summaryrefslogtreecommitdiff
path: root/docs
diff options
context:
space:
mode:
Diffstat (limited to 'docs')
-rw-r--r--docs/AliasAnalysis.rst702
-rw-r--r--docs/Atomics.rst441
-rw-r--r--docs/BitCodeFormat.rst1045
-rw-r--r--docs/BranchWeightMetadata.rst118
-rw-r--r--docs/Bugpoint.rst218
-rw-r--r--docs/CMake.rst423
-rw-r--r--docs/CodeGenerator.rst2428
-rw-r--r--docs/CodingStandards.rst1175
-rw-r--r--docs/CommandGuide/FileCheck.rst284
-rw-r--r--docs/CommandGuide/bugpoint.rst247
-rw-r--r--docs/CommandGuide/index.rst53
-rw-r--r--docs/CommandGuide/lit.rst474
-rw-r--r--docs/CommandGuide/llc.rst251
-rw-r--r--docs/CommandGuide/lli.rst300
-rw-r--r--docs/CommandGuide/llvm-ar.rst458
-rw-r--r--docs/CommandGuide/llvm-as.rst56
-rw-r--r--docs/CommandGuide/llvm-bcanalyzer.rst424
-rw-r--r--docs/CommandGuide/llvm-build.rst102
-rw-r--r--docs/CommandGuide/llvm-config.rst176
-rw-r--r--docs/CommandGuide/llvm-cov.rst51
-rw-r--r--docs/CommandGuide/llvm-diff.rst56
-rw-r--r--docs/CommandGuide/llvm-dis.rst69
-rw-r--r--docs/CommandGuide/llvm-extract.rst104
-rw-r--r--docs/CommandGuide/llvm-link.rst96
-rw-r--r--docs/CommandGuide/llvm-nm.rst189
-rw-r--r--docs/CommandGuide/llvm-prof.rst63
-rw-r--r--docs/CommandGuide/llvm-ranlib.rst61
-rw-r--r--docs/CommandGuide/llvm-stress.rst48
-rw-r--r--docs/CommandGuide/opt.rst183
-rw-r--r--docs/CommandGuide/tblgen.rst186
-rw-r--r--docs/CommandLine.rst1615
-rw-r--r--docs/CompilerWriterInfo.html267
-rw-r--r--docs/DebuggingJITedCode.html184
-rw-r--r--docs/DeveloperPolicy.rst508
-rw-r--r--docs/ExceptionHandling.rst367
-rw-r--r--docs/ExtendedIntegerResults.txt133
-rw-r--r--docs/ExtendingLLVM.html379
-rw-r--r--docs/FAQ.rst464
-rw-r--r--docs/GCCFEBuildInstrs.html279
-rw-r--r--docs/GarbageCollection.html1389
-rw-r--r--docs/GetElementPtr.rst538
-rw-r--r--docs/GettingStarted.html1760
-rw-r--r--docs/GettingStartedVS.rst234
-rw-r--r--docs/GoldPlugin.html227
-rw-r--r--docs/HistoricalNotes/2000-11-18-EarlyDesignIdeas.txt74
-rw-r--r--docs/HistoricalNotes/2000-11-18-EarlyDesignIdeasResp.txt199
-rw-r--r--docs/HistoricalNotes/2000-12-06-EncodingIdea.txt30
-rw-r--r--docs/HistoricalNotes/2000-12-06-MeetingSummary.txt83
-rw-r--r--docs/HistoricalNotes/2001-01-31-UniversalIRIdea.txt39
-rw-r--r--docs/HistoricalNotes/2001-02-06-TypeNotationDebate.txt67
-rw-r--r--docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp1.txt75
-rw-r--r--docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp2.txt53
-rw-r--r--docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp4.txt89
-rw-r--r--docs/HistoricalNotes/2001-02-09-AdveComments.txt120
-rw-r--r--docs/HistoricalNotes/2001-02-09-AdveCommentsResponse.txt245
-rw-r--r--docs/HistoricalNotes/2001-02-13-Reference-Memory.txt39
-rw-r--r--docs/HistoricalNotes/2001-02-13-Reference-MemoryResponse.txt47
-rw-r--r--docs/HistoricalNotes/2001-04-16-DynamicCompilation.txt49
-rw-r--r--docs/HistoricalNotes/2001-05-18-ExceptionHandling.txt202
-rw-r--r--docs/HistoricalNotes/2001-05-19-ExceptionResponse.txt45
-rw-r--r--docs/HistoricalNotes/2001-06-01-GCCOptimizations.txt63
-rw-r--r--docs/HistoricalNotes/2001-06-01-GCCOptimizations2.txt71
-rw-r--r--docs/HistoricalNotes/2001-06-20-.NET-Differences.txt30
-rw-r--r--docs/HistoricalNotes/2001-07-06-LoweringIRForCodeGen.txt31
-rw-r--r--docs/HistoricalNotes/2001-09-18-OptimizeExceptions.txt56
-rw-r--r--docs/HistoricalNotes/2002-05-12-InstListChange.txt55
-rw-r--r--docs/HistoricalNotes/2002-06-25-MegaPatchInfo.txt72
-rw-r--r--docs/HistoricalNotes/2003-01-23-CygwinNotes.txt28
-rw-r--r--docs/HistoricalNotes/2003-06-25-Reoptimizer1.txt137
-rw-r--r--docs/HistoricalNotes/2003-06-26-Reoptimizer2.txt110
-rw-r--r--docs/HistoricalNotes/2007-OriginalClangReadme.txt178
-rw-r--r--docs/HowToAddABuilder.rst90
-rw-r--r--docs/HowToReleaseLLVM.html581
-rw-r--r--docs/HowToSubmitABug.html345
-rw-r--r--docs/LLVMBuild.html368
-rw-r--r--docs/LLVMBuild.txt21
-rw-r--r--docs/LangRef.html8731
-rw-r--r--docs/Lexicon.rst194
-rw-r--r--docs/LinkTimeOptimization.rst298
-rw-r--r--docs/Makefile127
-rw-r--r--docs/Makefile.sphinx159
-rw-r--r--docs/MakefileGuide.rst956
-rw-r--r--docs/Packaging.rst75
-rw-r--r--docs/Passes.html2066
-rw-r--r--docs/ProgrammersManual.html4137
-rw-r--r--docs/Projects.rst327
-rw-r--r--docs/README.txt12
-rw-r--r--docs/ReleaseNotes.html755
-rw-r--r--docs/SegmentedStacks.rst80
-rw-r--r--docs/SourceLevelDebugging.html2858
-rw-r--r--docs/SystemLibrary.html316
-rw-r--r--docs/TableGenFundamentals.rst799
-rw-r--r--docs/TestSuiteMakefileGuide.html351
-rw-r--r--docs/TestingGuide.html915
-rw-r--r--docs/WritingAnLLVMBackend.html2533
-rw-r--r--docs/WritingAnLLVMPass.html1954
-rw-r--r--docs/_static/lines.gifbin0 -> 91 bytes
-rw-r--r--docs/_static/llvm.css112
-rw-r--r--docs/_templates/indexsidebar.html7
-rw-r--r--docs/_templates/layout.html13
-rw-r--r--docs/_themes/llvm-theme/layout.html23
-rw-r--r--docs/_themes/llvm-theme/static/contents.pngbin0 -> 202 bytes
-rw-r--r--docs/_themes/llvm-theme/static/llvm-theme.css374
-rw-r--r--docs/_themes/llvm-theme/static/logo.pngbin0 -> 9864 bytes
-rw-r--r--docs/_themes/llvm-theme/static/navigation.pngbin0 -> 218 bytes
-rw-r--r--docs/_themes/llvm-theme/theme.conf4
-rw-r--r--docs/conf.py252
-rw-r--r--docs/design_and_overview.rst36
-rw-r--r--docs/development_process.rst30
-rw-r--r--docs/doxygen.cfg.in1632
-rw-r--r--docs/doxygen.css408
-rw-r--r--docs/doxygen.footer13
-rw-r--r--docs/doxygen.header9
-rw-r--r--docs/doxygen.intro18
-rw-r--r--docs/index.rst70
-rw-r--r--docs/mailing_lists.rst35
-rw-r--r--docs/make.bat190
-rw-r--r--docs/programming.rst40
-rw-r--r--docs/re_format.7756
-rw-r--r--docs/subsystems.rst91
-rw-r--r--docs/tutorial/LangImpl1.html348
-rw-r--r--docs/tutorial/LangImpl2.html1231
-rw-r--r--docs/tutorial/LangImpl3.html1268
-rw-r--r--docs/tutorial/LangImpl4.html1152
-rw-r--r--docs/tutorial/LangImpl5-cfg.pngbin0 -> 38586 bytes
-rw-r--r--docs/tutorial/LangImpl5.html1772
-rw-r--r--docs/tutorial/LangImpl6.html1829
-rw-r--r--docs/tutorial/LangImpl7.html2164
-rw-r--r--docs/tutorial/LangImpl8.html359
-rw-r--r--docs/tutorial/OCamlLangImpl1.html365
-rw-r--r--docs/tutorial/OCamlLangImpl2.html1043
-rw-r--r--docs/tutorial/OCamlLangImpl3.html1093
-rw-r--r--docs/tutorial/OCamlLangImpl4.html1026
-rw-r--r--docs/tutorial/OCamlLangImpl5.html1560
-rw-r--r--docs/tutorial/OCamlLangImpl6.html1574
-rw-r--r--docs/tutorial/OCamlLangImpl7.html1904
-rw-r--r--docs/tutorial/OCamlLangImpl8.html359
-rw-r--r--docs/tutorial/index.html48
-rw-r--r--docs/userguides.rst90
-rw-r--r--docs/yaml2obj.rst222
140 files changed, 73680 insertions, 0 deletions
diff --git a/docs/AliasAnalysis.rst b/docs/AliasAnalysis.rst
new file mode 100644
index 00000000000..2d4f2914ee0
--- /dev/null
+++ b/docs/AliasAnalysis.rst
@@ -0,0 +1,702 @@
+.. _alias_analysis:
+
+==================================
+LLVM Alias Analysis Infrastructure
+==================================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+Alias Analysis (aka Pointer Analysis) is a class of techniques which attempt to
+determine whether or not two pointers ever can point to the same object in
+memory. There are many different algorithms for alias analysis and many
+different ways of classifying them: flow-sensitive vs. flow-insensitive,
+context-sensitive vs. context-insensitive, field-sensitive
+vs. field-insensitive, unification-based vs. subset-based, etc. Traditionally,
+alias analyses respond to a query with a `Must, May, or No`_ alias response,
+indicating that two pointers always point to the same object, might point to the
+same object, or are known to never point to the same object.
+
+The LLVM `AliasAnalysis
+<http://llvm.org/doxygen/classllvm_1_1AliasAnalysis.html>`__ class is the
+primary interface used by clients and implementations of alias analyses in the
+LLVM system. This class is the common interface between clients of alias
+analysis information and the implementations providing it, and is designed to
+support a wide range of implementations and clients (but currently all clients
+are assumed to be flow-insensitive). In addition to simple alias analysis
+information, this class exposes Mod/Ref information from those implementations
+which can provide it, allowing for powerful analyses and transformations to work
+well together.
+
+This document contains information necessary to successfully implement this
+interface, use it, and to test both sides. It also explains some of the finer
+points about what exactly results mean. If you feel that something is unclear
+or should be added, please `let me know <mailto:sabre@nondot.org>`_.
+
+``AliasAnalysis`` Class Overview
+================================
+
+The `AliasAnalysis <http://llvm.org/doxygen/classllvm_1_1AliasAnalysis.html>`__
+class defines the interface that the various alias analysis implementations
+should support. This class exports two important enums: ``AliasResult`` and
+``ModRefResult`` which represent the result of an alias query or a mod/ref
+query, respectively.
+
+The ``AliasAnalysis`` interface exposes information about memory, represented in
+several different ways. In particular, memory objects are represented as a
+starting address and size, and function calls are represented as the actual
+``call`` or ``invoke`` instructions that performs the call. The
+``AliasAnalysis`` interface also exposes some helper methods which allow you to
+get mod/ref information for arbitrary instructions.
+
+All ``AliasAnalysis`` interfaces require that in queries involving multiple
+values, values which are not `constants <LangRef.html#constants>`_ are all
+defined within the same function.
+
+Representation of Pointers
+--------------------------
+
+Most importantly, the ``AliasAnalysis`` class provides several methods which are
+used to query whether or not two memory objects alias, whether function calls
+can modify or read a memory object, etc. For all of these queries, memory
+objects are represented as a pair of their starting address (a symbolic LLVM
+``Value*``) and a static size.
+
+Representing memory objects as a starting address and a size is critically
+important for correct Alias Analyses. For example, consider this (silly, but
+possible) C code:
+
+.. code-block:: c++
+
+ int i;
+ char C[2];
+ char A[10];
+ /* ... */
+ for (i = 0; i != 10; ++i) {
+ C[0] = A[i]; /* One byte store */
+ C[1] = A[9-i]; /* One byte store */
+ }
+
+In this case, the ``basicaa`` pass will disambiguate the stores to ``C[0]`` and
+``C[1]`` because they are accesses to two distinct locations one byte apart, and
+the accesses are each one byte. In this case, the Loop Invariant Code Motion
+(LICM) pass can use store motion to remove the stores from the loop. In
+constrast, the following code:
+
+.. code-block:: c++
+
+ int i;
+ char C[2];
+ char A[10];
+ /* ... */
+ for (i = 0; i != 10; ++i) {
+ ((short*)C)[0] = A[i]; /* Two byte store! */
+ C[1] = A[9-i]; /* One byte store */
+ }
+
+In this case, the two stores to C do alias each other, because the access to the
+``&C[0]`` element is a two byte access. If size information wasn't available in
+the query, even the first case would have to conservatively assume that the
+accesses alias.
+
+.. _alias:
+
+The ``alias`` method
+--------------------
+
+The ``alias`` method is the primary interface used to determine whether or not
+two memory objects alias each other. It takes two memory objects as input and
+returns MustAlias, PartialAlias, MayAlias, or NoAlias as appropriate.
+
+Like all ``AliasAnalysis`` interfaces, the ``alias`` method requires that either
+the two pointer values be defined within the same function, or at least one of
+the values is a `constant <LangRef.html#constants>`_.
+
+.. _Must, May, or No:
+
+Must, May, and No Alias Responses
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``NoAlias`` response may be used when there is never an immediate dependence
+between any memory reference *based* on one pointer and any memory reference
+*based* the other. The most obvious example is when the two pointers point to
+non-overlapping memory ranges. Another is when the two pointers are only ever
+used for reading memory. Another is when the memory is freed and reallocated
+between accesses through one pointer and accesses through the other --- in this
+case, there is a dependence, but it's mediated by the free and reallocation.
+
+As an exception to this is with the `noalias <LangRef.html#noalias>`_ keyword;
+the "irrelevant" dependencies are ignored.
+
+The ``MayAlias`` response is used whenever the two pointers might refer to the
+same object.
+
+The ``PartialAlias`` response is used when the two memory objects are known to
+be overlapping in some way, but do not start at the same address.
+
+The ``MustAlias`` response may only be returned if the two memory objects are
+guaranteed to always start at exactly the same location. A ``MustAlias``
+response implies that the pointers compare equal.
+
+The ``getModRefInfo`` methods
+-----------------------------
+
+The ``getModRefInfo`` methods return information about whether the execution of
+an instruction can read or modify a memory location. Mod/Ref information is
+always conservative: if an instruction **might** read or write a location,
+``ModRef`` is returned.
+
+The ``AliasAnalysis`` class also provides a ``getModRefInfo`` method for testing
+dependencies between function calls. This method takes two call sites (``CS1``
+& ``CS2``), returns ``NoModRef`` if neither call writes to memory read or
+written by the other, ``Ref`` if ``CS1`` reads memory written by ``CS2``,
+``Mod`` if ``CS1`` writes to memory read or written by ``CS2``, or ``ModRef`` if
+``CS1`` might read or write memory written to by ``CS2``. Note that this
+relation is not commutative.
+
+Other useful ``AliasAnalysis`` methods
+--------------------------------------
+
+Several other tidbits of information are often collected by various alias
+analysis implementations and can be put to good use by various clients.
+
+The ``pointsToConstantMemory`` method
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``pointsToConstantMemory`` method returns true if and only if the analysis
+can prove that the pointer only points to unchanging memory locations
+(functions, constant global variables, and the null pointer). This information
+can be used to refine mod/ref information: it is impossible for an unchanging
+memory location to be modified.
+
+.. _never access memory or only read memory:
+
+The ``doesNotAccessMemory`` and ``onlyReadsMemory`` methods
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These methods are used to provide very simple mod/ref information for function
+calls. The ``doesNotAccessMemory`` method returns true for a function if the
+analysis can prove that the function never reads or writes to memory, or if the
+function only reads from constant memory. Functions with this property are
+side-effect free and only depend on their input arguments, allowing them to be
+eliminated if they form common subexpressions or be hoisted out of loops. Many
+common functions behave this way (e.g., ``sin`` and ``cos``) but many others do
+not (e.g., ``acos``, which modifies the ``errno`` variable).
+
+The ``onlyReadsMemory`` method returns true for a function if analysis can prove
+that (at most) the function only reads from non-volatile memory. Functions with
+this property are side-effect free, only depending on their input arguments and
+the state of memory when they are called. This property allows calls to these
+functions to be eliminated and moved around, as long as there is no store
+instruction that changes the contents of memory. Note that all functions that
+satisfy the ``doesNotAccessMemory`` method also satisfies ``onlyReadsMemory``.
+
+Writing a new ``AliasAnalysis`` Implementation
+==============================================
+
+Writing a new alias analysis implementation for LLVM is quite straight-forward.
+There are already several implementations that you can use for examples, and the
+following information should help fill in any details. For a examples, take a
+look at the `various alias analysis implementations`_ included with LLVM.
+
+Different Pass styles
+---------------------
+
+The first step to determining what type of `LLVM pass <WritingAnLLVMPass.html>`_
+you need to use for your Alias Analysis. As is the case with most other
+analyses and transformations, the answer should be fairly obvious from what type
+of problem you are trying to solve:
+
+#. If you require interprocedural analysis, it should be a ``Pass``.
+#. If you are a function-local analysis, subclass ``FunctionPass``.
+#. If you don't need to look at the program at all, subclass ``ImmutablePass``.
+
+In addition to the pass that you subclass, you should also inherit from the
+``AliasAnalysis`` interface, of course, and use the ``RegisterAnalysisGroup``
+template to register as an implementation of ``AliasAnalysis``.
+
+Required initialization calls
+-----------------------------
+
+Your subclass of ``AliasAnalysis`` is required to invoke two methods on the
+``AliasAnalysis`` base class: ``getAnalysisUsage`` and
+``InitializeAliasAnalysis``. In particular, your implementation of
+``getAnalysisUsage`` should explicitly call into the
+``AliasAnalysis::getAnalysisUsage`` method in addition to doing any declaring
+any pass dependencies your pass has. Thus you should have something like this:
+
+.. code-block:: c++
+
+ void getAnalysisUsage(AnalysisUsage &amp;AU) const {
+ AliasAnalysis::getAnalysisUsage(AU);
+ // declare your dependencies here.
+ }
+
+Additionally, your must invoke the ``InitializeAliasAnalysis`` method from your
+analysis run method (``run`` for a ``Pass``, ``runOnFunction`` for a
+``FunctionPass``, or ``InitializePass`` for an ``ImmutablePass``). For example
+(as part of a ``Pass``):
+
+.. code-block:: c++
+
+ bool run(Module &M) {
+ InitializeAliasAnalysis(this);
+ // Perform analysis here...
+ return false;
+ }
+
+Interfaces which may be specified
+---------------------------------
+
+All of the `AliasAnalysis
+<http://llvm.org/doxygen/classllvm_1_1AliasAnalysis.html>`__ virtual methods
+default to providing `chaining`_ to another alias analysis implementation, which
+ends up returning conservatively correct information (returning "May" Alias and
+"Mod/Ref" for alias and mod/ref queries respectively). Depending on the
+capabilities of the analysis you are implementing, you just override the
+interfaces you can improve.
+
+.. _chaining:
+.. _chain:
+
+``AliasAnalysis`` chaining behavior
+-----------------------------------
+
+With only one special exception (the `no-aa`_ pass) every alias analysis pass
+chains to another alias analysis implementation (for example, the user can
+specify "``-basicaa -ds-aa -licm``" to get the maximum benefit from both alias
+analyses). The alias analysis class automatically takes care of most of this
+for methods that you don't override. For methods that you do override, in code
+paths that return a conservative MayAlias or Mod/Ref result, simply return
+whatever the superclass computes. For example:
+
+.. code-block:: c++
+
+ AliasAnalysis::AliasResult alias(const Value *V1, unsigned V1Size,
+ const Value *V2, unsigned V2Size) {
+ if (...)
+ return NoAlias;
+ ...
+
+ // Couldn't determine a must or no-alias result.
+ return AliasAnalysis::alias(V1, V1Size, V2, V2Size);
+ }
+
+In addition to analysis queries, you must make sure to unconditionally pass LLVM
+`update notification`_ methods to the superclass as well if you override them,
+which allows all alias analyses in a change to be updated.
+
+.. _update notification:
+
+Updating analysis results for transformations
+---------------------------------------------
+
+Alias analysis information is initially computed for a static snapshot of the
+program, but clients will use this information to make transformations to the
+code. All but the most trivial forms of alias analysis will need to have their
+analysis results updated to reflect the changes made by these transformations.
+
+The ``AliasAnalysis`` interface exposes four methods which are used to
+communicate program changes from the clients to the analysis implementations.
+Various alias analysis implementations should use these methods to ensure that
+their internal data structures are kept up-to-date as the program changes (for
+example, when an instruction is deleted), and clients of alias analysis must be
+sure to call these interfaces appropriately.
+
+The ``deleteValue`` method
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``deleteValue`` method is called by transformations when they remove an
+instruction or any other value from the program (including values that do not
+use pointers). Typically alias analyses keep data structures that have entries
+for each value in the program. When this method is called, they should remove
+any entries for the specified value, if they exist.
+
+The ``copyValue`` method
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``copyValue`` method is used when a new value is introduced into the
+program. There is no way to introduce a value into the program that did not
+exist before (this doesn't make sense for a safe compiler transformation), so
+this is the only way to introduce a new value. This method indicates that the
+new value has exactly the same properties as the value being copied.
+
+The ``replaceWithNewValue`` method
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This method is a simple helper method that is provided to make clients easier to
+use. It is implemented by copying the old analysis information to the new
+value, then deleting the old value. This method cannot be overridden by alias
+analysis implementations.
+
+The ``addEscapingUse`` method
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``addEscapingUse`` method is used when the uses of a pointer value have
+changed in ways that may invalidate precomputed analysis information.
+Implementations may either use this callback to provide conservative responses
+for points whose uses have change since analysis time, or may recompute some or
+all of their internal state to continue providing accurate responses.
+
+In general, any new use of a pointer value is considered an escaping use, and
+must be reported through this callback, *except* for the uses below:
+
+* A ``bitcast`` or ``getelementptr`` of the pointer
+* A ``store`` through the pointer (but not a ``store`` *of* the pointer)
+* A ``load`` through the pointer
+
+Efficiency Issues
+-----------------
+
+From the LLVM perspective, the only thing you need to do to provide an efficient
+alias analysis is to make sure that alias analysis **queries** are serviced
+quickly. The actual calculation of the alias analysis results (the "run"
+method) is only performed once, but many (perhaps duplicate) queries may be
+performed. Because of this, try to move as much computation to the run method
+as possible (within reason).
+
+Limitations
+-----------
+
+The AliasAnalysis infrastructure has several limitations which make writing a
+new ``AliasAnalysis`` implementation difficult.
+
+There is no way to override the default alias analysis. It would be very useful
+to be able to do something like "``opt -my-aa -O2``" and have it use ``-my-aa``
+for all passes which need AliasAnalysis, but there is currently no support for
+that, short of changing the source code and recompiling. Similarly, there is
+also no way of setting a chain of analyses as the default.
+
+There is no way for transform passes to declare that they preserve
+``AliasAnalysis`` implementations. The ``AliasAnalysis`` interface includes
+``deleteValue`` and ``copyValue`` methods which are intended to allow a pass to
+keep an AliasAnalysis consistent, however there's no way for a pass to declare
+in its ``getAnalysisUsage`` that it does so. Some passes attempt to use
+``AU.addPreserved<AliasAnalysis>``, however this doesn't actually have any
+effect.
+
+``AliasAnalysisCounter`` (``-count-aa``) and ``AliasDebugger`` (``-debug-aa``)
+are implemented as ``ModulePass`` classes, so if your alias analysis uses
+``FunctionPass``, it won't be able to use these utilities. If you try to use
+them, the pass manager will silently route alias analysis queries directly to
+``BasicAliasAnalysis`` instead.
+
+Similarly, the ``opt -p`` option introduces ``ModulePass`` passes between each
+pass, which prevents the use of ``FunctionPass`` alias analysis passes.
+
+The ``AliasAnalysis`` API does have functions for notifying implementations when
+values are deleted or copied, however these aren't sufficient. There are many
+other ways that LLVM IR can be modified which could be relevant to
+``AliasAnalysis`` implementations which can not be expressed.
+
+The ``AliasAnalysisDebugger`` utility seems to suggest that ``AliasAnalysis``
+implementations can expect that they will be informed of any relevant ``Value``
+before it appears in an alias query. However, popular clients such as ``GVN``
+don't support this, and are known to trigger errors when run with the
+``AliasAnalysisDebugger``.
+
+Due to several of the above limitations, the most obvious use for the
+``AliasAnalysisCounter`` utility, collecting stats on all alias queries in a
+compilation, doesn't work, even if the ``AliasAnalysis`` implementations don't
+use ``FunctionPass``. There's no way to set a default, much less a default
+sequence, and there's no way to preserve it.
+
+The ``AliasSetTracker`` class (which is used by ``LICM``) makes a
+non-deterministic number of alias queries. This can cause stats collected by
+``AliasAnalysisCounter`` to have fluctuations among identical runs, for
+example. Another consequence is that debugging techniques involving pausing
+execution after a predetermined number of queries can be unreliable.
+
+Many alias queries can be reformulated in terms of other alias queries. When
+multiple ``AliasAnalysis`` queries are chained together, it would make sense to
+start those queries from the beginning of the chain, with care taken to avoid
+infinite looping, however currently an implementation which wants to do this can
+only start such queries from itself.
+
+Using alias analysis results
+============================
+
+There are several different ways to use alias analysis results. In order of
+preference, these are:
+
+Using the ``MemoryDependenceAnalysis`` Pass
+-------------------------------------------
+
+The ``memdep`` pass uses alias analysis to provide high-level dependence
+information about memory-using instructions. This will tell you which store
+feeds into a load, for example. It uses caching and other techniques to be
+efficient, and is used by Dead Store Elimination, GVN, and memcpy optimizations.
+
+.. _AliasSetTracker:
+
+Using the ``AliasSetTracker`` class
+-----------------------------------
+
+Many transformations need information about alias **sets** that are active in
+some scope, rather than information about pairwise aliasing. The
+`AliasSetTracker <http://llvm.org/doxygen/classllvm_1_1AliasSetTracker.html>`__
+class is used to efficiently build these Alias Sets from the pairwise alias
+analysis information provided by the ``AliasAnalysis`` interface.
+
+First you initialize the AliasSetTracker by using the "``add``" methods to add
+information about various potentially aliasing instructions in the scope you are
+interested in. Once all of the alias sets are completed, your pass should
+simply iterate through the constructed alias sets, using the ``AliasSetTracker``
+``begin()``/``end()`` methods.
+
+The ``AliasSet``\s formed by the ``AliasSetTracker`` are guaranteed to be
+disjoint, calculate mod/ref information and volatility for the set, and keep
+track of whether or not all of the pointers in the set are Must aliases. The
+AliasSetTracker also makes sure that sets are properly folded due to call
+instructions, and can provide a list of pointers in each set.
+
+As an example user of this, the `Loop Invariant Code Motion
+<doxygen/structLICM.html>`_ pass uses ``AliasSetTracker``\s to calculate alias
+sets for each loop nest. If an ``AliasSet`` in a loop is not modified, then all
+load instructions from that set may be hoisted out of the loop. If any alias
+sets are stored to **and** are must alias sets, then the stores may be sunk
+to outside of the loop, promoting the memory location to a register for the
+duration of the loop nest. Both of these transformations only apply if the
+pointer argument is loop-invariant.
+
+The AliasSetTracker implementation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The AliasSetTracker class is implemented to be as efficient as possible. It
+uses the union-find algorithm to efficiently merge AliasSets when a pointer is
+inserted into the AliasSetTracker that aliases multiple sets. The primary data
+structure is a hash table mapping pointers to the AliasSet they are in.
+
+The AliasSetTracker class must maintain a list of all of the LLVM ``Value*``\s
+that are in each AliasSet. Since the hash table already has entries for each
+LLVM ``Value*`` of interest, the AliasesSets thread the linked list through
+these hash-table nodes to avoid having to allocate memory unnecessarily, and to
+make merging alias sets extremely efficient (the linked list merge is constant
+time).
+
+You shouldn't need to understand these details if you are just a client of the
+AliasSetTracker, but if you look at the code, hopefully this brief description
+will help make sense of why things are designed the way they are.
+
+Using the ``AliasAnalysis`` interface directly
+----------------------------------------------
+
+If neither of these utility class are what your pass needs, you should use the
+interfaces exposed by the ``AliasAnalysis`` class directly. Try to use the
+higher-level methods when possible (e.g., use mod/ref information instead of the
+`alias`_ method directly if possible) to get the best precision and efficiency.
+
+Existing alias analysis implementations and clients
+===================================================
+
+If you're going to be working with the LLVM alias analysis infrastructure, you
+should know what clients and implementations of alias analysis are available.
+In particular, if you are implementing an alias analysis, you should be aware of
+the `the clients`_ that are useful for monitoring and evaluating different
+implementations.
+
+.. _various alias analysis implementations:
+
+Available ``AliasAnalysis`` implementations
+-------------------------------------------
+
+This section lists the various implementations of the ``AliasAnalysis``
+interface. With the exception of the `-no-aa`_ implementation, all of these
+`chain`_ to other alias analysis implementations.
+
+.. _no-aa:
+.. _-no-aa:
+
+The ``-no-aa`` pass
+^^^^^^^^^^^^^^^^^^^
+
+The ``-no-aa`` pass is just like what it sounds: an alias analysis that never
+returns any useful information. This pass can be useful if you think that alias
+analysis is doing something wrong and are trying to narrow down a problem.
+
+The ``-basicaa`` pass
+^^^^^^^^^^^^^^^^^^^^^
+
+The ``-basicaa`` pass is an aggressive local analysis that *knows* many
+important facts:
+
+* Distinct globals, stack allocations, and heap allocations can never alias.
+* Globals, stack allocations, and heap allocations never alias the null pointer.
+* Different fields of a structure do not alias.
+* Indexes into arrays with statically differing subscripts cannot alias.
+* Many common standard C library functions `never access memory or only read
+ memory`_.
+* Pointers that obviously point to constant globals "``pointToConstantMemory``".
+* Function calls can not modify or references stack allocations if they never
+ escape from the function that allocates them (a common case for automatic
+ arrays).
+
+The ``-globalsmodref-aa`` pass
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This pass implements a simple context-sensitive mod/ref and alias analysis for
+internal global variables that don't "have their address taken". If a global
+does not have its address taken, the pass knows that no pointers alias the
+global. This pass also keeps track of functions that it knows never access
+memory or never read memory. This allows certain optimizations (e.g. GVN) to
+eliminate call instructions entirely.
+
+The real power of this pass is that it provides context-sensitive mod/ref
+information for call instructions. This allows the optimizer to know that calls
+to a function do not clobber or read the value of the global, allowing loads and
+stores to be eliminated.
+
+.. note::
+
+ This pass is somewhat limited in its scope (only support non-address taken
+ globals), but is very quick analysis.
+
+The ``-steens-aa`` pass
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``-steens-aa`` pass implements a variation on the well-known "Steensgaard's
+algorithm" for interprocedural alias analysis. Steensgaard's algorithm is a
+unification-based, flow-insensitive, context-insensitive, and field-insensitive
+alias analysis that is also very scalable (effectively linear time).
+
+The LLVM ``-steens-aa`` pass implements a "speculatively field-**sensitive**"
+version of Steensgaard's algorithm using the Data Structure Analysis framework.
+This gives it substantially more precision than the standard algorithm while
+maintaining excellent analysis scalability.
+
+.. note::
+
+ ``-steens-aa`` is available in the optional "poolalloc" module. It is not part
+ of the LLVM core.
+
+The ``-ds-aa`` pass
+^^^^^^^^^^^^^^^^^^^
+
+The ``-ds-aa`` pass implements the full Data Structure Analysis algorithm. Data
+Structure Analysis is a modular unification-based, flow-insensitive,
+context-**sensitive**, and speculatively field-**sensitive** alias
+analysis that is also quite scalable, usually at ``O(n * log(n))``.
+
+This algorithm is capable of responding to a full variety of alias analysis
+queries, and can provide context-sensitive mod/ref information as well. The
+only major facility not implemented so far is support for must-alias
+information.
+
+.. note::
+
+ ``-ds-aa`` is available in the optional "poolalloc" module. It is not part of
+ the LLVM core.
+
+The ``-scev-aa`` pass
+^^^^^^^^^^^^^^^^^^^^^
+
+The ``-scev-aa`` pass implements AliasAnalysis queries by translating them into
+ScalarEvolution queries. This gives it a more complete understanding of
+``getelementptr`` instructions and loop induction variables than other alias
+analyses have.
+
+Alias analysis driven transformations
+-------------------------------------
+
+LLVM includes several alias-analysis driven transformations which can be used
+with any of the implementations above.
+
+The ``-adce`` pass
+^^^^^^^^^^^^^^^^^^
+
+The ``-adce`` pass, which implements Aggressive Dead Code Elimination uses the
+``AliasAnalysis`` interface to delete calls to functions that do not have
+side-effects and are not used.
+
+The ``-licm`` pass
+^^^^^^^^^^^^^^^^^^
+
+The ``-licm`` pass implements various Loop Invariant Code Motion related
+transformations. It uses the ``AliasAnalysis`` interface for several different
+transformations:
+
+* It uses mod/ref information to hoist or sink load instructions out of loops if
+ there are no instructions in the loop that modifies the memory loaded.
+
+* It uses mod/ref information to hoist function calls out of loops that do not
+ write to memory and are loop-invariant.
+
+* If uses alias information to promote memory objects that are loaded and stored
+ to in loops to live in a register instead. It can do this if there are no may
+ aliases to the loaded/stored memory location.
+
+The ``-argpromotion`` pass
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``-argpromotion`` pass promotes by-reference arguments to be passed in
+by-value instead. In particular, if pointer arguments are only loaded from it
+passes in the value loaded instead of the address to the function. This pass
+uses alias information to make sure that the value loaded from the argument
+pointer is not modified between the entry of the function and any load of the
+pointer.
+
+The ``-gvn``, ``-memcpyopt``, and ``-dse`` passes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These passes use AliasAnalysis information to reason about loads and stores.
+
+.. _the clients:
+
+Clients for debugging and evaluation of implementations
+-------------------------------------------------------
+
+These passes are useful for evaluating the various alias analysis
+implementations. You can use them with commands like:
+
+.. code-block:: bash
+
+ % opt -ds-aa -aa-eval foo.bc -disable-output -stats
+
+The ``-print-alias-sets`` pass
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``-print-alias-sets`` pass is exposed as part of the ``opt`` tool to print
+out the Alias Sets formed by the `AliasSetTracker`_ class. This is useful if
+you're using the ``AliasSetTracker`` class. To use it, use something like:
+
+.. code-block:: bash
+
+ % opt -ds-aa -print-alias-sets -disable-output
+
+The ``-count-aa`` pass
+^^^^^^^^^^^^^^^^^^^^^^
+
+The ``-count-aa`` pass is useful to see how many queries a particular pass is
+making and what responses are returned by the alias analysis. As an example:
+
+.. code-block:: bash
+
+ % opt -basicaa -count-aa -ds-aa -count-aa -licm
+
+will print out how many queries (and what responses are returned) by the
+``-licm`` pass (of the ``-ds-aa`` pass) and how many queries are made of the
+``-basicaa`` pass by the ``-ds-aa`` pass. This can be useful when debugging a
+transformation or an alias analysis implementation.
+
+The ``-aa-eval`` pass
+^^^^^^^^^^^^^^^^^^^^^
+
+The ``-aa-eval`` pass simply iterates through all pairs of pointers in a
+function and asks an alias analysis whether or not the pointers alias. This
+gives an indication of the precision of the alias analysis. Statistics are
+printed indicating the percent of no/may/must aliases found (a more precise
+algorithm will have a lower number of may aliases).
+
+Memory Dependence Analysis
+==========================
+
+If you're just looking to be a client of alias analysis information, consider
+using the Memory Dependence Analysis interface instead. MemDep is a lazy,
+caching layer on top of alias analysis that is able to answer the question of
+what preceding memory operations a given instruction depends on, either at an
+intra- or inter-block level. Because of its laziness and caching policy, using
+MemDep can be a significant performance win over accessing alias analysis
+directly.
diff --git a/docs/Atomics.rst b/docs/Atomics.rst
new file mode 100644
index 00000000000..1bca53e2b17
--- /dev/null
+++ b/docs/Atomics.rst
@@ -0,0 +1,441 @@
+.. _atomics:
+
+==============================================
+LLVM Atomic Instructions and Concurrency Guide
+==============================================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+Historically, LLVM has not had very strong support for concurrency; some minimal
+intrinsics were provided, and ``volatile`` was used in some cases to achieve
+rough semantics in the presence of concurrency. However, this is changing;
+there are now new instructions which are well-defined in the presence of threads
+and asynchronous signals, and the model for existing instructions has been
+clarified in the IR.
+
+The atomic instructions are designed specifically to provide readable IR and
+optimized code generation for the following:
+
+* The new C++0x ``<atomic>`` header. (`C++0x draft available here
+ <http://www.open-std.org/jtc1/sc22/wg21/>`_.) (`C1x draft available here
+ <http://www.open-std.org/jtc1/sc22/wg14/>`_.)
+
+* Proper semantics for Java-style memory, for both ``volatile`` and regular
+ shared variables. (`Java Specification
+ <http://java.sun.com/docs/books/jls/third_edition/html/memory.html>`_)
+
+* gcc-compatible ``__sync_*`` builtins. (`Description
+ <http://gcc.gnu.org/onlinedocs/gcc/Atomic-Builtins.html>`_)
+
+* Other scenarios with atomic semantics, including ``static`` variables with
+ non-trivial constructors in C++.
+
+Atomic and volatile in the IR are orthogonal; "volatile" is the C/C++ volatile,
+which ensures that every volatile load and store happens and is performed in the
+stated order. A couple examples: if a SequentiallyConsistent store is
+immediately followed by another SequentiallyConsistent store to the same
+address, the first store can be erased. This transformation is not allowed for a
+pair of volatile stores. On the other hand, a non-volatile non-atomic load can
+be moved across a volatile load freely, but not an Acquire load.
+
+This document is intended to provide a guide to anyone either writing a frontend
+for LLVM or working on optimization passes for LLVM with a guide for how to deal
+with instructions with special semantics in the presence of concurrency. This
+is not intended to be a precise guide to the semantics; the details can get
+extremely complicated and unreadable, and are not usually necessary.
+
+.. _Optimization outside atomic:
+
+Optimization outside atomic
+===========================
+
+The basic ``'load'`` and ``'store'`` allow a variety of optimizations, but can
+lead to undefined results in a concurrent environment; see `NotAtomic`_. This
+section specifically goes into the one optimizer restriction which applies in
+concurrent environments, which gets a bit more of an extended description
+because any optimization dealing with stores needs to be aware of it.
+
+From the optimizer's point of view, the rule is that if there are not any
+instructions with atomic ordering involved, concurrency does not matter, with
+one exception: if a variable might be visible to another thread or signal
+handler, a store cannot be inserted along a path where it might not execute
+otherwise. Take the following example:
+
+.. code-block:: c
+
+ /* C code, for readability; run through clang -O2 -S -emit-llvm to get
+ equivalent IR */
+ int x;
+ void f(int* a) {
+ for (int i = 0; i < 100; i++) {
+ if (a[i])
+ x += 1;
+ }
+ }
+
+The following is equivalent in non-concurrent situations:
+
+.. code-block:: c
+
+ int x;
+ void f(int* a) {
+ int xtemp = x;
+ for (int i = 0; i < 100; i++) {
+ if (a[i])
+ xtemp += 1;
+ }
+ x = xtemp;
+ }
+
+However, LLVM is not allowed to transform the former to the latter: it could
+indirectly introduce undefined behavior if another thread can access ``x`` at
+the same time. (This example is particularly of interest because before the
+concurrency model was implemented, LLVM would perform this transformation.)
+
+Note that speculative loads are allowed; a load which is part of a race returns
+``undef``, but does not have undefined behavior.
+
+Atomic instructions
+===================
+
+For cases where simple loads and stores are not sufficient, LLVM provides
+various atomic instructions. The exact guarantees provided depend on the
+ordering; see `Atomic orderings`_.
+
+``load atomic`` and ``store atomic`` provide the same basic functionality as
+non-atomic loads and stores, but provide additional guarantees in situations
+where threads and signals are involved.
+
+``cmpxchg`` and ``atomicrmw`` are essentially like an atomic load followed by an
+atomic store (where the store is conditional for ``cmpxchg``), but no other
+memory operation can happen on any thread between the load and store. Note that
+LLVM's cmpxchg does not provide quite as many options as the C++0x version.
+
+A ``fence`` provides Acquire and/or Release ordering which is not part of
+another operation; it is normally used along with Monotonic memory operations.
+A Monotonic load followed by an Acquire fence is roughly equivalent to an
+Acquire load.
+
+Frontends generating atomic instructions generally need to be aware of the
+target to some degree; atomic instructions are guaranteed to be lock-free, and
+therefore an instruction which is wider than the target natively supports can be
+impossible to generate.
+
+.. _Atomic orderings:
+
+Atomic orderings
+================
+
+In order to achieve a balance between performance and necessary guarantees,
+there are six levels of atomicity. They are listed in order of strength; each
+level includes all the guarantees of the previous level except for
+Acquire/Release. (See also `LangRef Ordering <LangRef.html#ordering>`_.)
+
+.. _NotAtomic:
+
+NotAtomic
+---------
+
+NotAtomic is the obvious, a load or store which is not atomic. (This isn't
+really a level of atomicity, but is listed here for comparison.) This is
+essentially a regular load or store. If there is a race on a given memory
+location, loads from that location return undef.
+
+Relevant standard
+ This is intended to match shared variables in C/C++, and to be used in any
+ other context where memory access is necessary, and a race is impossible. (The
+ precise definition is in `LangRef Memory Model <LangRef.html#memmodel>`_.)
+
+Notes for frontends
+ The rule is essentially that all memory accessed with basic loads and stores
+ by multiple threads should be protected by a lock or other synchronization;
+ otherwise, you are likely to run into undefined behavior. If your frontend is
+ for a "safe" language like Java, use Unordered to load and store any shared
+ variable. Note that NotAtomic volatile loads and stores are not properly
+ atomic; do not try to use them as a substitute. (Per the C/C++ standards,
+ volatile does provide some limited guarantees around asynchronous signals, but
+ atomics are generally a better solution.)
+
+Notes for optimizers
+ Introducing loads to shared variables along a codepath where they would not
+ otherwise exist is allowed; introducing stores to shared variables is not. See
+ `Optimization outside atomic`_.
+
+Notes for code generation
+ The one interesting restriction here is that it is not allowed to write to
+ bytes outside of the bytes relevant to a store. This is mostly relevant to
+ unaligned stores: it is not allowed in general to convert an unaligned store
+ into two aligned stores of the same width as the unaligned store. Backends are
+ also expected to generate an i8 store as an i8 store, and not an instruction
+ which writes to surrounding bytes. (If you are writing a backend for an
+ architecture which cannot satisfy these restrictions and cares about
+ concurrency, please send an email to llvmdev.)
+
+Unordered
+---------
+
+Unordered is the lowest level of atomicity. It essentially guarantees that races
+produce somewhat sane results instead of having undefined behavior. It also
+guarantees the operation to be lock-free, so it do not depend on the data being
+part of a special atomic structure or depend on a separate per-process global
+lock. Note that code generation will fail for unsupported atomic operations; if
+you need such an operation, use explicit locking.
+
+Relevant standard
+ This is intended to match the Java memory model for shared variables.
+
+Notes for frontends
+ This cannot be used for synchronization, but is useful for Java and other
+ "safe" languages which need to guarantee that the generated code never
+ exhibits undefined behavior. Note that this guarantee is cheap on common
+ platforms for loads of a native width, but can be expensive or unavailable for
+ wider loads, like a 64-bit store on ARM. (A frontend for Java or other "safe"
+ languages would normally split a 64-bit store on ARM into two 32-bit unordered
+ stores.)
+
+Notes for optimizers
+ In terms of the optimizer, this prohibits any transformation that transforms a
+ single load into multiple loads, transforms a store into multiple stores,
+ narrows a store, or stores a value which would not be stored otherwise. Some
+ examples of unsafe optimizations are narrowing an assignment into a bitfield,
+ rematerializing a load, and turning loads and stores into a memcpy
+ call. Reordering unordered operations is safe, though, and optimizers should
+ take advantage of that because unordered operations are common in languages
+ that need them.
+
+Notes for code generation
+ These operations are required to be atomic in the sense that if you use
+ unordered loads and unordered stores, a load cannot see a value which was
+ never stored. A normal load or store instruction is usually sufficient, but
+ note that an unordered load or store cannot be split into multiple
+ instructions (or an instruction which does multiple memory operations, like
+ ``LDRD`` on ARM).
+
+Monotonic
+---------
+
+Monotonic is the weakest level of atomicity that can be used in synchronization
+primitives, although it does not provide any general synchronization. It
+essentially guarantees that if you take all the operations affecting a specific
+address, a consistent ordering exists.
+
+Relevant standard
+ This corresponds to the C++0x/C1x ``memory_order_relaxed``; see those
+ standards for the exact definition.
+
+Notes for frontends
+ If you are writing a frontend which uses this directly, use with caution. The
+ guarantees in terms of synchronization are very weak, so make sure these are
+ only used in a pattern which you know is correct. Generally, these would
+ either be used for atomic operations which do not protect other memory (like
+ an atomic counter), or along with a ``fence``.
+
+Notes for optimizers
+ In terms of the optimizer, this can be treated as a read+write on the relevant
+ memory location (and alias analysis will take advantage of that). In addition,
+ it is legal to reorder non-atomic and Unordered loads around Monotonic
+ loads. CSE/DSE and a few other optimizations are allowed, but Monotonic
+ operations are unlikely to be used in ways which would make those
+ optimizations useful.
+
+Notes for code generation
+ Code generation is essentially the same as that for unordered for loads and
+ stores. No fences are required. ``cmpxchg`` and ``atomicrmw`` are required
+ to appear as a single operation.
+
+Acquire
+-------
+
+Acquire provides a barrier of the sort necessary to acquire a lock to access
+other memory with normal loads and stores.
+
+Relevant standard
+ This corresponds to the C++0x/C1x ``memory_order_acquire``. It should also be
+ used for C++0x/C1x ``memory_order_consume``.
+
+Notes for frontends
+ If you are writing a frontend which uses this directly, use with caution.
+ Acquire only provides a semantic guarantee when paired with a Release
+ operation.
+
+Notes for optimizers
+ Optimizers not aware of atomics can treat this like a nothrow call. It is
+ also possible to move stores from before an Acquire load or read-modify-write
+ operation to after it, and move non-Acquire loads from before an Acquire
+ operation to after it.
+
+Notes for code generation
+ Architectures with weak memory ordering (essentially everything relevant today
+ except x86 and SPARC) require some sort of fence to maintain the Acquire
+ semantics. The precise fences required varies widely by architecture, but for
+ a simple implementation, most architectures provide a barrier which is strong
+ enough for everything (``dmb`` on ARM, ``sync`` on PowerPC, etc.). Putting
+ such a fence after the equivalent Monotonic operation is sufficient to
+ maintain Acquire semantics for a memory operation.
+
+Release
+-------
+
+Release is similar to Acquire, but with a barrier of the sort necessary to
+release a lock.
+
+Relevant standard
+ This corresponds to the C++0x/C1x ``memory_order_release``.
+
+Notes for frontends
+ If you are writing a frontend which uses this directly, use with caution.
+ Release only provides a semantic guarantee when paired with a Acquire
+ operation.
+
+Notes for optimizers
+ Optimizers not aware of atomics can treat this like a nothrow call. It is
+ also possible to move loads from after a Release store or read-modify-write
+ operation to before it, and move non-Release stores from after an Release
+ operation to before it.
+
+Notes for code generation
+ See the section on Acquire; a fence before the relevant operation is usually
+ sufficient for Release. Note that a store-store fence is not sufficient to
+ implement Release semantics; store-store fences are generally not exposed to
+ IR because they are extremely difficult to use correctly.
+
+AcquireRelease
+--------------
+
+AcquireRelease (``acq_rel`` in IR) provides both an Acquire and a Release
+barrier (for fences and operations which both read and write memory).
+
+Relevant standard
+ This corresponds to the C++0x/C1x ``memory_order_acq_rel``.
+
+Notes for frontends
+ If you are writing a frontend which uses this directly, use with caution.
+ Acquire only provides a semantic guarantee when paired with a Release
+ operation, and vice versa.
+
+Notes for optimizers
+ In general, optimizers should treat this like a nothrow call; the possible
+ optimizations are usually not interesting.
+
+Notes for code generation
+ This operation has Acquire and Release semantics; see the sections on Acquire
+ and Release.
+
+SequentiallyConsistent
+----------------------
+
+SequentiallyConsistent (``seq_cst`` in IR) provides Acquire semantics for loads
+and Release semantics for stores. Additionally, it guarantees that a total
+ordering exists between all SequentiallyConsistent operations.
+
+Relevant standard
+ This corresponds to the C++0x/C1x ``memory_order_seq_cst``, Java volatile, and
+ the gcc-compatible ``__sync_*`` builtins which do not specify otherwise.
+
+Notes for frontends
+ If a frontend is exposing atomic operations, these are much easier to reason
+ about for the programmer than other kinds of operations, and using them is
+ generally a practical performance tradeoff.
+
+Notes for optimizers
+ Optimizers not aware of atomics can treat this like a nothrow call. For
+ SequentiallyConsistent loads and stores, the same reorderings are allowed as
+ for Acquire loads and Release stores, except that SequentiallyConsistent
+ operations may not be reordered.
+
+Notes for code generation
+ SequentiallyConsistent loads minimally require the same barriers as Acquire
+ operations and SequentiallyConsistent stores require Release
+ barriers. Additionally, the code generator must enforce ordering between
+ SequentiallyConsistent stores followed by SequentiallyConsistent loads. This
+ is usually done by emitting either a full fence before the loads or a full
+ fence after the stores; which is preferred varies by architecture.
+
+Atomics and IR optimization
+===========================
+
+Predicates for optimizer writers to query:
+
+* ``isSimple()``: A load or store which is not volatile or atomic. This is
+ what, for example, memcpyopt would check for operations it might transform.
+
+* ``isUnordered()``: A load or store which is not volatile and at most
+ Unordered. This would be checked, for example, by LICM before hoisting an
+ operation.
+
+* ``mayReadFromMemory()``/``mayWriteToMemory()``: Existing predicate, but note
+ that they return true for any operation which is volatile or at least
+ Monotonic.
+
+* Alias analysis: Note that AA will return ModRef for anything Acquire or
+ Release, and for the address accessed by any Monotonic operation.
+
+To support optimizing around atomic operations, make sure you are using the
+right predicates; everything should work if that is done. If your pass should
+optimize some atomic operations (Unordered operations in particular), make sure
+it doesn't replace an atomic load or store with a non-atomic operation.
+
+Some examples of how optimizations interact with various kinds of atomic
+operations:
+
+* ``memcpyopt``: An atomic operation cannot be optimized into part of a
+ memcpy/memset, including unordered loads/stores. It can pull operations
+ across some atomic operations.
+
+* LICM: Unordered loads/stores can be moved out of a loop. It just treats
+ monotonic operations like a read+write to a memory location, and anything
+ stricter than that like a nothrow call.
+
+* DSE: Unordered stores can be DSE'ed like normal stores. Monotonic stores can
+ be DSE'ed in some cases, but it's tricky to reason about, and not especially
+ important.
+
+* Folding a load: Any atomic load from a constant global can be constant-folded,
+ because it cannot be observed. Similar reasoning allows scalarrepl with
+ atomic loads and stores.
+
+Atomics and Codegen
+===================
+
+Atomic operations are represented in the SelectionDAG with ``ATOMIC_*`` opcodes.
+On architectures which use barrier instructions for all atomic ordering (like
+ARM), appropriate fences are split out as the DAG is built.
+
+The MachineMemOperand for all atomic operations is currently marked as volatile;
+this is not correct in the IR sense of volatile, but CodeGen handles anything
+marked volatile very conservatively. This should get fixed at some point.
+
+Common architectures have some way of representing at least a pointer-sized
+lock-free ``cmpxchg``; such an operation can be used to implement all the other
+atomic operations which can be represented in IR up to that size. Backends are
+expected to implement all those operations, but not operations which cannot be
+implemented in a lock-free manner. It is expected that backends will give an
+error when given an operation which cannot be implemented. (The LLVM code
+generator is not very helpful here at the moment, but hopefully that will
+change.)
+
+The implementation of atomics on LL/SC architectures (like ARM) is currently a
+bit of a mess; there is a lot of copy-pasted code across targets, and the
+representation is relatively unsuited to optimization (it would be nice to be
+able to optimize loops involving cmpxchg etc.).
+
+On x86, all atomic loads generate a ``MOV``. SequentiallyConsistent stores
+generate an ``XCHG``, other stores generate a ``MOV``. SequentiallyConsistent
+fences generate an ``MFENCE``, other fences do not cause any code to be
+generated. cmpxchg uses the ``LOCK CMPXCHG`` instruction. ``atomicrmw xchg``
+uses ``XCHG``, ``atomicrmw add`` and ``atomicrmw sub`` use ``XADD``, and all
+other ``atomicrmw`` operations generate a loop with ``LOCK CMPXCHG``. Depending
+on the users of the result, some ``atomicrmw`` operations can be translated into
+operations like ``LOCK AND``, but that does not work in general.
+
+On ARM, MIPS, and many other RISC architectures, Acquire, Release, and
+SequentiallyConsistent semantics require barrier instructions for every such
+operation. Loads and stores generate normal instructions. ``cmpxchg`` and
+``atomicrmw`` can be represented using a loop with LL/SC-style instructions
+which take some sort of exclusive lock on a cache line (``LDREX`` and ``STREX``
+on ARM, etc.). At the moment, the IR does not provide any way to represent a
+weak ``cmpxchg`` which would not require a loop.
diff --git a/docs/BitCodeFormat.rst b/docs/BitCodeFormat.rst
new file mode 100644
index 00000000000..d3995e7036b
--- /dev/null
+++ b/docs/BitCodeFormat.rst
@@ -0,0 +1,1045 @@
+.. _bitcode_format:
+
+.. role:: raw-html(raw)
+ :format: html
+
+========================
+LLVM Bitcode File Format
+========================
+
+.. contents::
+ :local:
+
+Abstract
+========
+
+This document describes the LLVM bitstream file format and the encoding of the
+LLVM IR into it.
+
+Overview
+========
+
+What is commonly known as the LLVM bitcode file format (also, sometimes
+anachronistically known as bytecode) is actually two things: a `bitstream
+container format`_ and an `encoding of LLVM IR`_ into the container format.
+
+The bitstream format is an abstract encoding of structured data, very similar to
+XML in some ways. Like XML, bitstream files contain tags, and nested
+structures, and you can parse the file without having to understand the tags.
+Unlike XML, the bitstream format is a binary encoding, and unlike XML it
+provides a mechanism for the file to self-describe "abbreviations", which are
+effectively size optimizations for the content.
+
+LLVM IR files may be optionally embedded into a `wrapper`_ structure that makes
+it easy to embed extra data along with LLVM IR files.
+
+This document first describes the LLVM bitstream format, describes the wrapper
+format, then describes the record structure used by LLVM IR files.
+
+.. _bitstream container format:
+
+Bitstream Format
+================
+
+The bitstream format is literally a stream of bits, with a very simple
+structure. This structure consists of the following concepts:
+
+* A "`magic number`_" that identifies the contents of the stream.
+
+* Encoding `primitives`_ like variable bit-rate integers.
+
+* `Blocks`_, which define nested content.
+
+* `Data Records`_, which describe entities within the file.
+
+* Abbreviations, which specify compression optimizations for the file.
+
+Note that the `llvm-bcanalyzer <CommandGuide/html/llvm-bcanalyzer.html>`_ tool
+can be used to dump and inspect arbitrary bitstreams, which is very useful for
+understanding the encoding.
+
+.. _magic number:
+
+Magic Numbers
+-------------
+
+The first two bytes of a bitcode file are 'BC' (``0x42``, ``0x43``). The second
+two bytes are an application-specific magic number. Generic bitcode tools can
+look at only the first two bytes to verify the file is bitcode, while
+application-specific programs will want to look at all four.
+
+.. _primitives:
+
+Primitives
+----------
+
+A bitstream literally consists of a stream of bits, which are read in order
+starting with the least significant bit of each byte. The stream is made up of
+a number of primitive values that encode a stream of unsigned integer values.
+These integers are encoded in two ways: either as `Fixed Width Integers`_ or as
+`Variable Width Integers`_.
+
+.. _Fixed Width Integers:
+.. _fixed-width value:
+
+Fixed Width Integers
+^^^^^^^^^^^^^^^^^^^^
+
+Fixed-width integer values have their low bits emitted directly to the file.
+For example, a 3-bit integer value encodes 1 as 001. Fixed width integers are
+used when there are a well-known number of options for a field. For example,
+boolean values are usually encoded with a 1-bit wide integer.
+
+.. _Variable Width Integers:
+.. _Variable Width Integer:
+.. _variable-width value:
+
+Variable Width Integers
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Variable-width integer (VBR) values encode values of arbitrary size, optimizing
+for the case where the values are small. Given a 4-bit VBR field, any 3-bit
+value (0 through 7) is encoded directly, with the high bit set to zero. Values
+larger than N-1 bits emit their bits in a series of N-1 bit chunks, where all
+but the last set the high bit.
+
+For example, the value 27 (0x1B) is encoded as 1011 0011 when emitted as a vbr4
+value. The first set of four bits indicates the value 3 (011) with a
+continuation piece (indicated by a high bit of 1). The next word indicates a
+value of 24 (011 << 3) with no continuation. The sum (3+24) yields the value
+27.
+
+.. _char6-encoded value:
+
+6-bit characters
+^^^^^^^^^^^^^^^^
+
+6-bit characters encode common characters into a fixed 6-bit field. They
+represent the following characters with the following 6-bit values:
+
+::
+
+ 'a' .. 'z' --- 0 .. 25
+ 'A' .. 'Z' --- 26 .. 51
+ '0' .. '9' --- 52 .. 61
+ '.' --- 62
+ '_' --- 63
+
+This encoding is only suitable for encoding characters and strings that consist
+only of the above characters. It is completely incapable of encoding characters
+not in the set.
+
+Word Alignment
+^^^^^^^^^^^^^^
+
+Occasionally, it is useful to emit zero bits until the bitstream is a multiple
+of 32 bits. This ensures that the bit position in the stream can be represented
+as a multiple of 32-bit words.
+
+Abbreviation IDs
+----------------
+
+A bitstream is a sequential series of `Blocks`_ and `Data Records`_. Both of
+these start with an abbreviation ID encoded as a fixed-bitwidth field. The
+width is specified by the current block, as described below. The value of the
+abbreviation ID specifies either a builtin ID (which have special meanings,
+defined below) or one of the abbreviation IDs defined for the current block by
+the stream itself.
+
+The set of builtin abbrev IDs is:
+
+* 0 - `END_BLOCK`_ --- This abbrev ID marks the end of the current block.
+
+* 1 - `ENTER_SUBBLOCK`_ --- This abbrev ID marks the beginning of a new
+ block.
+
+* 2 - `DEFINE_ABBREV`_ --- This defines a new abbreviation.
+
+* 3 - `UNABBREV_RECORD`_ --- This ID specifies the definition of an
+ unabbreviated record.
+
+Abbreviation IDs 4 and above are defined by the stream itself, and specify an
+`abbreviated record encoding`_.
+
+.. _Blocks:
+
+Blocks
+------
+
+Blocks in a bitstream denote nested regions of the stream, and are identified by
+a content-specific id number (for example, LLVM IR uses an ID of 12 to represent
+function bodies). Block IDs 0-7 are reserved for `standard blocks`_ whose
+meaning is defined by Bitcode; block IDs 8 and greater are application
+specific. Nested blocks capture the hierarchical structure of the data encoded
+in it, and various properties are associated with blocks as the file is parsed.
+Block definitions allow the reader to efficiently skip blocks in constant time
+if the reader wants a summary of blocks, or if it wants to efficiently skip data
+it does not understand. The LLVM IR reader uses this mechanism to skip function
+bodies, lazily reading them on demand.
+
+When reading and encoding the stream, several properties are maintained for the
+block. In particular, each block maintains:
+
+#. A current abbrev id width. This value starts at 2 at the beginning of the
+ stream, and is set every time a block record is entered. The block entry
+ specifies the abbrev id width for the body of the block.
+
+#. A set of abbreviations. Abbreviations may be defined within a block, in
+ which case they are only defined in that block (neither subblocks nor
+ enclosing blocks see the abbreviation). Abbreviations can also be defined
+ inside a `BLOCKINFO`_ block, in which case they are defined in all blocks
+ that match the ID that the ``BLOCKINFO`` block is describing.
+
+As sub blocks are entered, these properties are saved and the new sub-block has
+its own set of abbreviations, and its own abbrev id width. When a sub-block is
+popped, the saved values are restored.
+
+.. _ENTER_SUBBLOCK:
+
+ENTER_SUBBLOCK Encoding
+^^^^^^^^^^^^^^^^^^^^^^^
+
+:raw-html:`<tt>`
+[ENTER_SUBBLOCK, blockid\ :sub:`vbr8`, newabbrevlen\ :sub:`vbr4`, <align32bits>, blocklen_32]
+:raw-html:`</tt>`
+
+The ``ENTER_SUBBLOCK`` abbreviation ID specifies the start of a new block
+record. The ``blockid`` value is encoded as an 8-bit VBR identifier, and
+indicates the type of block being entered, which can be a `standard block`_ or
+an application-specific block. The ``newabbrevlen`` value is a 4-bit VBR, which
+specifies the abbrev id width for the sub-block. The ``blocklen`` value is a
+32-bit aligned value that specifies the size of the subblock in 32-bit
+words. This value allows the reader to skip over the entire block in one jump.
+
+.. _END_BLOCK:
+
+END_BLOCK Encoding
+^^^^^^^^^^^^^^^^^^
+
+``[END_BLOCK, <align32bits>]``
+
+The ``END_BLOCK`` abbreviation ID specifies the end of the current block record.
+Its end is aligned to 32-bits to ensure that the size of the block is an even
+multiple of 32-bits.
+
+.. _Data Records:
+
+Data Records
+------------
+
+Data records consist of a record code and a number of (up to) 64-bit integer
+values. The interpretation of the code and values is application specific and
+may vary between different block types. Records can be encoded either using an
+unabbrev record, or with an abbreviation. In the LLVM IR format, for example,
+there is a record which encodes the target triple of a module. The code is
+``MODULE_CODE_TRIPLE``, and the values of the record are the ASCII codes for the
+characters in the string.
+
+.. _UNABBREV_RECORD:
+
+UNABBREV_RECORD Encoding
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+:raw-html:`<tt>`
+[UNABBREV_RECORD, code\ :sub:`vbr6`, numops\ :sub:`vbr6`, op0\ :sub:`vbr6`, op1\ :sub:`vbr6`, ...]
+:raw-html:`</tt>`
+
+An ``UNABBREV_RECORD`` provides a default fallback encoding, which is both
+completely general and extremely inefficient. It can describe an arbitrary
+record by emitting the code and operands as VBRs.
+
+For example, emitting an LLVM IR target triple as an unabbreviated record
+requires emitting the ``UNABBREV_RECORD`` abbrevid, a vbr6 for the
+``MODULE_CODE_TRIPLE`` code, a vbr6 for the length of the string, which is equal
+to the number of operands, and a vbr6 for each character. Because there are no
+letters with values less than 32, each letter would need to be emitted as at
+least a two-part VBR, which means that each letter would require at least 12
+bits. This is not an efficient encoding, but it is fully general.
+
+.. _abbreviated record encoding:
+
+Abbreviated Record Encoding
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[<abbrevid>, fields...]``
+
+An abbreviated record is a abbreviation id followed by a set of fields that are
+encoded according to the `abbreviation definition`_. This allows records to be
+encoded significantly more densely than records encoded with the
+`UNABBREV_RECORD`_ type, and allows the abbreviation types to be specified in
+the stream itself, which allows the files to be completely self describing. The
+actual encoding of abbreviations is defined below.
+
+The record code, which is the first field of an abbreviated record, may be
+encoded in the abbreviation definition (as a literal operand) or supplied in the
+abbreviated record (as a Fixed or VBR operand value).
+
+.. _abbreviation definition:
+
+Abbreviations
+-------------
+
+Abbreviations are an important form of compression for bitstreams. The idea is
+to specify a dense encoding for a class of records once, then use that encoding
+to emit many records. It takes space to emit the encoding into the file, but
+the space is recouped (hopefully plus some) when the records that use it are
+emitted.
+
+Abbreviations can be determined dynamically per client, per file. Because the
+abbreviations are stored in the bitstream itself, different streams of the same
+format can contain different sets of abbreviations according to the needs of the
+specific stream. As a concrete example, LLVM IR files usually emit an
+abbreviation for binary operators. If a specific LLVM module contained no or
+few binary operators, the abbreviation does not need to be emitted.
+
+.. _DEFINE_ABBREV:
+
+DEFINE_ABBREV Encoding
+^^^^^^^^^^^^^^^^^^^^^^
+
+:raw-html:`<tt>`
+[DEFINE_ABBREV, numabbrevops\ :sub:`vbr5`, abbrevop0, abbrevop1, ...]
+:raw-html:`</tt>`
+
+A ``DEFINE_ABBREV`` record adds an abbreviation to the list of currently defined
+abbreviations in the scope of this block. This definition only exists inside
+this immediate block --- it is not visible in subblocks or enclosing blocks.
+Abbreviations are implicitly assigned IDs sequentially starting from 4 (the
+first application-defined abbreviation ID). Any abbreviations defined in a
+``BLOCKINFO`` record for the particular block type receive IDs first, in order,
+followed by any abbreviations defined within the block itself. Abbreviated data
+records reference this ID to indicate what abbreviation they are invoking.
+
+An abbreviation definition consists of the ``DEFINE_ABBREV`` abbrevid followed
+by a VBR that specifies the number of abbrev operands, then the abbrev operands
+themselves. Abbreviation operands come in three forms. They all start with a
+single bit that indicates whether the abbrev operand is a literal operand (when
+the bit is 1) or an encoding operand (when the bit is 0).
+
+#. Literal operands --- :raw-html:`<tt>` [1\ :sub:`1`, litvalue\
+ :sub:`vbr8`] :raw-html:`</tt>` --- Literal operands specify that the value in
+ the result is always a single specific value. This specific value is emitted
+ as a vbr8 after the bit indicating that it is a literal operand.
+
+#. Encoding info without data --- :raw-html:`<tt>` [0\ :sub:`1`, encoding\
+ :sub:`3`] :raw-html:`</tt>` --- Operand encodings that do not have extra data
+ are just emitted as their code.
+
+#. Encoding info with data --- :raw-html:`<tt>` [0\ :sub:`1`, encoding\
+ :sub:`3`, value\ :sub:`vbr5`] :raw-html:`</tt>` --- Operand encodings that do
+ have extra data are emitted as their code, followed by the extra data.
+
+The possible operand encodings are:
+
+* Fixed (code 1): The field should be emitted as a `fixed-width value`_, whose
+ width is specified by the operand's extra data.
+
+* VBR (code 2): The field should be emitted as a `variable-width value`_, whose
+ width is specified by the operand's extra data.
+
+* Array (code 3): This field is an array of values. The array operand has no
+ extra data, but expects another operand to follow it, indicating the element
+ type of the array. When reading an array in an abbreviated record, the first
+ integer is a vbr6 that indicates the array length, followed by the encoded
+ elements of the array. An array may only occur as the last operand of an
+ abbreviation (except for the one final operand that gives the array's
+ type).
+
+* Char6 (code 4): This field should be emitted as a `char6-encoded value`_.
+ This operand type takes no extra data. Char6 encoding is normally used as an
+ array element type.
+
+* Blob (code 5): This field is emitted as a vbr6, followed by padding to a
+ 32-bit boundary (for alignment) and an array of 8-bit objects. The array of
+ bytes is further followed by tail padding to ensure that its total length is a
+ multiple of 4 bytes. This makes it very efficient for the reader to decode
+ the data without having to make a copy of it: it can use a pointer to the data
+ in the mapped in file and poke directly at it. A blob may only occur as the
+ last operand of an abbreviation.
+
+For example, target triples in LLVM modules are encoded as a record of the form
+``[TRIPLE, 'a', 'b', 'c', 'd']``. Consider if the bitstream emitted the
+following abbrev entry:
+
+::
+
+ [0, Fixed, 4]
+ [0, Array]
+ [0, Char6]
+
+When emitting a record with this abbreviation, the above entry would be emitted
+as:
+
+:raw-html:`<tt><blockquote>`
+[4\ :sub:`abbrevwidth`, 2\ :sub:`4`, 4\ :sub:`vbr6`, 0\ :sub:`6`, 1\ :sub:`6`, 2\ :sub:`6`, 3\ :sub:`6`]
+:raw-html:`</blockquote></tt>`
+
+These values are:
+
+#. The first value, 4, is the abbreviation ID for this abbreviation.
+
+#. The second value, 2, is the record code for ``TRIPLE`` records within LLVM IR
+ file ``MODULE_BLOCK`` blocks.
+
+#. The third value, 4, is the length of the array.
+
+#. The rest of the values are the char6 encoded values for ``"abcd"``.
+
+With this abbreviation, the triple is emitted with only 37 bits (assuming a
+abbrev id width of 3). Without the abbreviation, significantly more space would
+be required to emit the target triple. Also, because the ``TRIPLE`` value is
+not emitted as a literal in the abbreviation, the abbreviation can also be used
+for any other string value.
+
+.. _standard blocks:
+.. _standard block:
+
+Standard Blocks
+---------------
+
+In addition to the basic block structure and record encodings, the bitstream
+also defines specific built-in block types. These block types specify how the
+stream is to be decoded or other metadata. In the future, new standard blocks
+may be added. Block IDs 0-7 are reserved for standard blocks.
+
+.. _BLOCKINFO:
+
+#0 - BLOCKINFO Block
+^^^^^^^^^^^^^^^^^^^^
+
+The ``BLOCKINFO`` block allows the description of metadata for other blocks.
+The currently specified records are:
+
+::
+
+ [SETBID (#1), blockid]
+ [DEFINE_ABBREV, ...]
+ [BLOCKNAME, ...name...]
+ [SETRECORDNAME, RecordID, ...name...]
+
+The ``SETBID`` record (code 1) indicates which block ID is being described.
+``SETBID`` records can occur multiple times throughout the block to change which
+block ID is being described. There must be a ``SETBID`` record prior to any
+other records.
+
+Standard ``DEFINE_ABBREV`` records can occur inside ``BLOCKINFO`` blocks, but
+unlike their occurrence in normal blocks, the abbreviation is defined for blocks
+matching the block ID we are describing, *not* the ``BLOCKINFO`` block
+itself. The abbreviations defined in ``BLOCKINFO`` blocks receive abbreviation
+IDs as described in `DEFINE_ABBREV`_.
+
+The ``BLOCKNAME`` record (code 2) can optionally occur in this block. The
+elements of the record are the bytes of the string name of the block.
+llvm-bcanalyzer can use this to dump out bitcode files symbolically.
+
+The ``SETRECORDNAME`` record (code 3) can also optionally occur in this block.
+The first operand value is a record ID number, and the rest of the elements of
+the record are the bytes for the string name of the record. llvm-bcanalyzer can
+use this to dump out bitcode files symbolically.
+
+Note that although the data in ``BLOCKINFO`` blocks is described as "metadata,"
+the abbreviations they contain are essential for parsing records from the
+corresponding blocks. It is not safe to skip them.
+
+.. _wrapper:
+
+Bitcode Wrapper Format
+======================
+
+Bitcode files for LLVM IR may optionally be wrapped in a simple wrapper
+structure. This structure contains a simple header that indicates the offset
+and size of the embedded BC file. This allows additional information to be
+stored alongside the BC file. The structure of this file header is:
+
+:raw-html:`<tt><blockquote>`
+[Magic\ :sub:`32`, Version\ :sub:`32`, Offset\ :sub:`32`, Size\ :sub:`32`, CPUType\ :sub:`32`]
+:raw-html:`</blockquote></tt>`
+
+Each of the fields are 32-bit fields stored in little endian form (as with the
+rest of the bitcode file fields). The Magic number is always ``0x0B17C0DE`` and
+the version is currently always ``0``. The Offset field is the offset in bytes
+to the start of the bitcode stream in the file, and the Size field is the size
+in bytes of the stream. CPUType is a target-specific value that can be used to
+encode the CPU of the target.
+
+.. _encoding of LLVM IR:
+
+LLVM IR Encoding
+================
+
+LLVM IR is encoded into a bitstream by defining blocks and records. It uses
+blocks for things like constant pools, functions, symbol tables, etc. It uses
+records for things like instructions, global variable descriptors, type
+descriptions, etc. This document does not describe the set of abbreviations
+that the writer uses, as these are fully self-described in the file, and the
+reader is not allowed to build in any knowledge of this.
+
+Basics
+------
+
+LLVM IR Magic Number
+^^^^^^^^^^^^^^^^^^^^
+
+The magic number for LLVM IR files is:
+
+:raw-html:`<tt><blockquote>`
+[0x0\ :sub:`4`, 0xC\ :sub:`4`, 0xE\ :sub:`4`, 0xD\ :sub:`4`]
+:raw-html:`</blockquote></tt>`
+
+When combined with the bitcode magic number and viewed as bytes, this is
+``"BC 0xC0DE"``.
+
+Signed VBRs
+^^^^^^^^^^^
+
+`Variable Width Integer`_ encoding is an efficient way to encode arbitrary sized
+unsigned values, but is an extremely inefficient for encoding signed values, as
+signed values are otherwise treated as maximally large unsigned values.
+
+As such, signed VBR values of a specific width are emitted as follows:
+
+* Positive values are emitted as VBRs of the specified width, but with their
+ value shifted left by one.
+
+* Negative values are emitted as VBRs of the specified width, but the negated
+ value is shifted left by one, and the low bit is set.
+
+With this encoding, small positive and small negative values can both be emitted
+efficiently. Signed VBR encoding is used in ``CST_CODE_INTEGER`` and
+``CST_CODE_WIDE_INTEGER`` records within ``CONSTANTS_BLOCK`` blocks.
+
+LLVM IR Blocks
+^^^^^^^^^^^^^^
+
+LLVM IR is defined with the following blocks:
+
+* 8 --- `MODULE_BLOCK`_ --- This is the top-level block that contains the entire
+ module, and describes a variety of per-module information.
+
+* 9 --- `PARAMATTR_BLOCK`_ --- This enumerates the parameter attributes.
+
+* 10 --- `TYPE_BLOCK`_ --- This describes all of the types in the module.
+
+* 11 --- `CONSTANTS_BLOCK`_ --- This describes constants for a module or
+ function.
+
+* 12 --- `FUNCTION_BLOCK`_ --- This describes a function body.
+
+* 13 --- `TYPE_SYMTAB_BLOCK`_ --- This describes the type symbol table.
+
+* 14 --- `VALUE_SYMTAB_BLOCK`_ --- This describes a value symbol table.
+
+* 15 --- `METADATA_BLOCK`_ --- This describes metadata items.
+
+* 16 --- `METADATA_ATTACHMENT`_ --- This contains records associating metadata
+ with function instruction values.
+
+.. _MODULE_BLOCK:
+
+MODULE_BLOCK Contents
+---------------------
+
+The ``MODULE_BLOCK`` block (id 8) is the top-level block for LLVM bitcode files,
+and each bitcode file must contain exactly one. In addition to records
+(described below) containing information about the module, a ``MODULE_BLOCK``
+block may contain the following sub-blocks:
+
+* `BLOCKINFO`_
+* `PARAMATTR_BLOCK`_
+* `TYPE_BLOCK`_
+* `TYPE_SYMTAB_BLOCK`_
+* `VALUE_SYMTAB_BLOCK`_
+* `CONSTANTS_BLOCK`_
+* `FUNCTION_BLOCK`_
+* `METADATA_BLOCK`_
+
+MODULE_CODE_VERSION Record
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[VERSION, version#]``
+
+The ``VERSION`` record (code 1) contains a single value indicating the format
+version. Only version 0 is supported at this time.
+
+MODULE_CODE_TRIPLE Record
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[TRIPLE, ...string...]``
+
+The ``TRIPLE`` record (code 2) contains a variable number of values representing
+the bytes of the ``target triple`` specification string.
+
+MODULE_CODE_DATALAYOUT Record
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[DATALAYOUT, ...string...]``
+
+The ``DATALAYOUT`` record (code 3) contains a variable number of values
+representing the bytes of the ``target datalayout`` specification string.
+
+MODULE_CODE_ASM Record
+^^^^^^^^^^^^^^^^^^^^^^
+
+``[ASM, ...string...]``
+
+The ``ASM`` record (code 4) contains a variable number of values representing
+the bytes of ``module asm`` strings, with individual assembly blocks separated
+by newline (ASCII 10) characters.
+
+.. _MODULE_CODE_SECTIONNAME:
+
+MODULE_CODE_SECTIONNAME Record
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[SECTIONNAME, ...string...]``
+
+The ``SECTIONNAME`` record (code 5) contains a variable number of values
+representing the bytes of a single section name string. There should be one
+``SECTIONNAME`` record for each section name referenced (e.g., in global
+variable or function ``section`` attributes) within the module. These records
+can be referenced by the 1-based index in the *section* fields of ``GLOBALVAR``
+or ``FUNCTION`` records.
+
+MODULE_CODE_DEPLIB Record
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[DEPLIB, ...string...]``
+
+The ``DEPLIB`` record (code 6) contains a variable number of values representing
+the bytes of a single dependent library name string, one of the libraries
+mentioned in a ``deplibs`` declaration. There should be one ``DEPLIB`` record
+for each library name referenced.
+
+MODULE_CODE_GLOBALVAR Record
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[GLOBALVAR, pointer type, isconst, initid, linkage, alignment, section, visibility, threadlocal, unnamed_addr]``
+
+The ``GLOBALVAR`` record (code 7) marks the declaration or definition of a
+global variable. The operand fields are:
+
+* *pointer type*: The type index of the pointer type used to point to this
+ global variable
+
+* *isconst*: Non-zero if the variable is treated as constant within the module,
+ or zero if it is not
+
+* *initid*: If non-zero, the value index of the initializer for this variable,
+ plus 1.
+
+.. _linkage type:
+
+* *linkage*: An encoding of the linkage type for this variable:
+ * ``external``: code 0
+ * ``weak``: code 1
+ * ``appending``: code 2
+ * ``internal``: code 3
+ * ``linkonce``: code 4
+ * ``dllimport``: code 5
+ * ``dllexport``: code 6
+ * ``extern_weak``: code 7
+ * ``common``: code 8
+ * ``private``: code 9
+ * ``weak_odr``: code 10
+ * ``linkonce_odr``: code 11
+ * ``available_externally``: code 12
+ * ``linker_private``: code 13
+
+* alignment*: The logarithm base 2 of the variable's requested alignment, plus 1
+
+* *section*: If non-zero, the 1-based section index in the table of
+ `MODULE_CODE_SECTIONNAME`_ entries.
+
+.. _visibility:
+
+* *visibility*: If present, an encoding of the visibility of this variable:
+ * ``default``: code 0
+ * ``hidden``: code 1
+ * ``protected``: code 2
+
+* *threadlocal*: If present, an encoding of the thread local storage mode of the
+ variable:
+ * ``not thread local``: code 0
+ * ``thread local; default TLS model``: code 1
+ * ``localdynamic``: code 2
+ * ``initialexec``: code 3
+ * ``localexec``: code 4
+
+* *unnamed_addr*: If present and non-zero, indicates that the variable has
+ ``unnamed_addr``
+
+.. _FUNCTION:
+
+MODULE_CODE_FUNCTION Record
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[FUNCTION, type, callingconv, isproto, linkage, paramattr, alignment, section, visibility, gc]``
+
+The ``FUNCTION`` record (code 8) marks the declaration or definition of a
+function. The operand fields are:
+
+* *type*: The type index of the function type describing this function
+
+* *callingconv*: The calling convention number:
+ * ``ccc``: code 0
+ * ``fastcc``: code 8
+ * ``coldcc``: code 9
+ * ``x86_stdcallcc``: code 64
+ * ``x86_fastcallcc``: code 65
+ * ``arm_apcscc``: code 66
+ * ``arm_aapcscc``: code 67
+ * ``arm_aapcs_vfpcc``: code 68
+
+* isproto*: Non-zero if this entry represents a declaration rather than a
+ definition
+
+* *linkage*: An encoding of the `linkage type`_ for this function
+
+* *paramattr*: If nonzero, the 1-based parameter attribute index into the table
+ of `PARAMATTR_CODE_ENTRY`_ entries.
+
+* *alignment*: The logarithm base 2 of the function's requested alignment, plus
+ 1
+
+* *section*: If non-zero, the 1-based section index in the table of
+ `MODULE_CODE_SECTIONNAME`_ entries.
+
+* *visibility*: An encoding of the `visibility`_ of this function
+
+* *gc*: If present and nonzero, the 1-based garbage collector index in the table
+ of `MODULE_CODE_GCNAME`_ entries.
+
+* *unnamed_addr*: If present and non-zero, indicates that the function has
+ ``unnamed_addr``
+
+MODULE_CODE_ALIAS Record
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[ALIAS, alias type, aliasee val#, linkage, visibility]``
+
+The ``ALIAS`` record (code 9) marks the definition of an alias. The operand
+fields are
+
+* *alias type*: The type index of the alias
+
+* *aliasee val#*: The value index of the aliased value
+
+* *linkage*: An encoding of the `linkage type`_ for this alias
+
+* *visibility*: If present, an encoding of the `visibility`_ of the alias
+
+MODULE_CODE_PURGEVALS Record
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[PURGEVALS, numvals]``
+
+The ``PURGEVALS`` record (code 10) resets the module-level value list to the
+size given by the single operand value. Module-level value list items are added
+by ``GLOBALVAR``, ``FUNCTION``, and ``ALIAS`` records. After a ``PURGEVALS``
+record is seen, new value indices will start from the given *numvals* value.
+
+.. _MODULE_CODE_GCNAME:
+
+MODULE_CODE_GCNAME Record
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[GCNAME, ...string...]``
+
+The ``GCNAME`` record (code 11) contains a variable number of values
+representing the bytes of a single garbage collector name string. There should
+be one ``GCNAME`` record for each garbage collector name referenced in function
+``gc`` attributes within the module. These records can be referenced by 1-based
+index in the *gc* fields of ``FUNCTION`` records.
+
+.. _PARAMATTR_BLOCK:
+
+PARAMATTR_BLOCK Contents
+------------------------
+
+The ``PARAMATTR_BLOCK`` block (id 9) contains a table of entries describing the
+attributes of function parameters. These entries are referenced by 1-based index
+in the *paramattr* field of module block `FUNCTION`_ records, or within the
+*attr* field of function block ``INST_INVOKE`` and ``INST_CALL`` records.
+
+Entries within ``PARAMATTR_BLOCK`` are constructed to ensure that each is unique
+(i.e., no two indicies represent equivalent attribute lists).
+
+.. _PARAMATTR_CODE_ENTRY:
+
+PARAMATTR_CODE_ENTRY Record
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[ENTRY, paramidx0, attr0, paramidx1, attr1...]``
+
+The ``ENTRY`` record (code 1) contains an even number of values describing a
+unique set of function parameter attributes. Each *paramidx* value indicates
+which set of attributes is represented, with 0 representing the return value
+attributes, 0xFFFFFFFF representing function attributes, and other values
+representing 1-based function parameters. Each *attr* value is a bitmap with the
+following interpretation:
+
+* bit 0: ``zeroext``
+* bit 1: ``signext``
+* bit 2: ``noreturn``
+* bit 3: ``inreg``
+* bit 4: ``sret``
+* bit 5: ``nounwind``
+* bit 6: ``noalias``
+* bit 7: ``byval``
+* bit 8: ``nest``
+* bit 9: ``readnone``
+* bit 10: ``readonly``
+* bit 11: ``noinline``
+* bit 12: ``alwaysinline``
+* bit 13: ``optsize``
+* bit 14: ``ssp``
+* bit 15: ``sspreq``
+* bits 16-31: ``align n``
+* bit 32: ``nocapture``
+* bit 33: ``noredzone``
+* bit 34: ``noimplicitfloat``
+* bit 35: ``naked``
+* bit 36: ``inlinehint``
+* bits 37-39: ``alignstack n``, represented as the logarithm
+ base 2 of the requested alignment, plus 1
+
+.. _TYPE_BLOCK:
+
+TYPE_BLOCK Contents
+-------------------
+
+The ``TYPE_BLOCK`` block (id 10) contains records which constitute a table of
+type operator entries used to represent types referenced within an LLVM
+module. Each record (with the exception of `NUMENTRY`_) generates a single type
+table entry, which may be referenced by 0-based index from instructions,
+constants, metadata, type symbol table entries, or other type operator records.
+
+Entries within ``TYPE_BLOCK`` are constructed to ensure that each entry is
+unique (i.e., no two indicies represent structurally equivalent types).
+
+.. _TYPE_CODE_NUMENTRY:
+.. _NUMENTRY:
+
+TYPE_CODE_NUMENTRY Record
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[NUMENTRY, numentries]``
+
+The ``NUMENTRY`` record (code 1) contains a single value which indicates the
+total number of type code entries in the type table of the module. If present,
+``NUMENTRY`` should be the first record in the block.
+
+TYPE_CODE_VOID Record
+^^^^^^^^^^^^^^^^^^^^^
+
+``[VOID]``
+
+The ``VOID`` record (code 2) adds a ``void`` type to the type table.
+
+TYPE_CODE_HALF Record
+^^^^^^^^^^^^^^^^^^^^^
+
+``[HALF]``
+
+The ``HALF`` record (code 10) adds a ``half`` (16-bit floating point) type to
+the type table.
+
+TYPE_CODE_FLOAT Record
+^^^^^^^^^^^^^^^^^^^^^^
+
+``[FLOAT]``
+
+The ``FLOAT`` record (code 3) adds a ``float`` (32-bit floating point) type to
+the type table.
+
+TYPE_CODE_DOUBLE Record
+^^^^^^^^^^^^^^^^^^^^^^^
+
+``[DOUBLE]``
+
+The ``DOUBLE`` record (code 4) adds a ``double`` (64-bit floating point) type to
+the type table.
+
+TYPE_CODE_LABEL Record
+^^^^^^^^^^^^^^^^^^^^^^
+
+``[LABEL]``
+
+The ``LABEL`` record (code 5) adds a ``label`` type to the type table.
+
+TYPE_CODE_OPAQUE Record
+^^^^^^^^^^^^^^^^^^^^^^^
+
+``[OPAQUE]``
+
+The ``OPAQUE`` record (code 6) adds an ``opaque`` type to the type table. Note
+that distinct ``opaque`` types are not unified.
+
+TYPE_CODE_INTEGER Record
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[INTEGER, width]``
+
+The ``INTEGER`` record (code 7) adds an integer type to the type table. The
+single *width* field indicates the width of the integer type.
+
+TYPE_CODE_POINTER Record
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[POINTER, pointee type, address space]``
+
+The ``POINTER`` record (code 8) adds a pointer type to the type table. The
+operand fields are
+
+* *pointee type*: The type index of the pointed-to type
+
+* *address space*: If supplied, the target-specific numbered address space where
+ the pointed-to object resides. Otherwise, the default address space is zero.
+
+TYPE_CODE_FUNCTION Record
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[FUNCTION, vararg, ignored, retty, ...paramty... ]``
+
+The ``FUNCTION`` record (code 9) adds a function type to the type table. The
+operand fields are
+
+* *vararg*: Non-zero if the type represents a varargs function
+
+* *ignored*: This value field is present for backward compatibility only, and is
+ ignored
+
+* *retty*: The type index of the function's return type
+
+* *paramty*: Zero or more type indices representing the parameter types of the
+ function
+
+TYPE_CODE_STRUCT Record
+^^^^^^^^^^^^^^^^^^^^^^^
+
+``[STRUCT, ispacked, ...eltty...]``
+
+The ``STRUCT`` record (code 10) adds a struct type to the type table. The
+operand fields are
+
+* *ispacked*: Non-zero if the type represents a packed structure
+
+* *eltty*: Zero or more type indices representing the element types of the
+ structure
+
+TYPE_CODE_ARRAY Record
+^^^^^^^^^^^^^^^^^^^^^^
+
+``[ARRAY, numelts, eltty]``
+
+The ``ARRAY`` record (code 11) adds an array type to the type table. The
+operand fields are
+
+* *numelts*: The number of elements in arrays of this type
+
+* *eltty*: The type index of the array element type
+
+TYPE_CODE_VECTOR Record
+^^^^^^^^^^^^^^^^^^^^^^^
+
+``[VECTOR, numelts, eltty]``
+
+The ``VECTOR`` record (code 12) adds a vector type to the type table. The
+operand fields are
+
+* *numelts*: The number of elements in vectors of this type
+
+* *eltty*: The type index of the vector element type
+
+TYPE_CODE_X86_FP80 Record
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[X86_FP80]``
+
+The ``X86_FP80`` record (code 13) adds an ``x86_fp80`` (80-bit floating point)
+type to the type table.
+
+TYPE_CODE_FP128 Record
+^^^^^^^^^^^^^^^^^^^^^^
+
+``[FP128]``
+
+The ``FP128`` record (code 14) adds an ``fp128`` (128-bit floating point) type
+to the type table.
+
+TYPE_CODE_PPC_FP128 Record
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[PPC_FP128]``
+
+The ``PPC_FP128`` record (code 15) adds a ``ppc_fp128`` (128-bit floating point)
+type to the type table.
+
+TYPE_CODE_METADATA Record
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``[METADATA]``
+
+The ``METADATA`` record (code 16) adds a ``metadata`` type to the type table.
+
+.. _CONSTANTS_BLOCK:
+
+CONSTANTS_BLOCK Contents
+------------------------
+
+The ``CONSTANTS_BLOCK`` block (id 11) ...
+
+.. _FUNCTION_BLOCK:
+
+FUNCTION_BLOCK Contents
+-----------------------
+
+The ``FUNCTION_BLOCK`` block (id 12) ...
+
+In addition to the record types described below, a ``FUNCTION_BLOCK`` block may
+contain the following sub-blocks:
+
+* `CONSTANTS_BLOCK`_
+* `VALUE_SYMTAB_BLOCK`_
+* `METADATA_ATTACHMENT`_
+
+.. _TYPE_SYMTAB_BLOCK:
+
+TYPE_SYMTAB_BLOCK Contents
+--------------------------
+
+The ``TYPE_SYMTAB_BLOCK`` block (id 13) contains entries which map between
+module-level named types and their corresponding type indices.
+
+.. _TST_CODE_ENTRY:
+
+TST_CODE_ENTRY Record
+^^^^^^^^^^^^^^^^^^^^^
+
+``[ENTRY, typeid, ...string...]``
+
+The ``ENTRY`` record (code 1) contains a variable number of values, with the
+first giving the type index of the designated type, and the remaining values
+giving the character codes of the type name. Each entry corresponds to a single
+named type.
+
+.. _VALUE_SYMTAB_BLOCK:
+
+VALUE_SYMTAB_BLOCK Contents
+---------------------------
+
+The ``VALUE_SYMTAB_BLOCK`` block (id 14) ...
+
+.. _METADATA_BLOCK:
+
+METADATA_BLOCK Contents
+-----------------------
+
+The ``METADATA_BLOCK`` block (id 15) ...
+
+.. _METADATA_ATTACHMENT:
+
+METADATA_ATTACHMENT Contents
+----------------------------
+
+The ``METADATA_ATTACHMENT`` block (id 16) ...
diff --git a/docs/BranchWeightMetadata.rst b/docs/BranchWeightMetadata.rst
new file mode 100644
index 00000000000..f0df971f874
--- /dev/null
+++ b/docs/BranchWeightMetadata.rst
@@ -0,0 +1,118 @@
+.. _branch_weight:
+
+===========================
+LLVM Branch Weight Metadata
+===========================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+Branch Weight Metadata represents branch weights as its likeliness to be
+taken. Metadata is assigned to the ``TerminatorInst`` as a ``MDNode`` of the
+``MD_prof`` kind. The first operator is always a ``MDString`` node with the
+string "branch_weights". Number of operators depends on the terminator type.
+
+Branch weights might be fetch from the profiling file, or generated based on
+`__builtin_expect`_ instruction.
+
+All weights are represented as an unsigned 32-bit values, where higher value
+indicates greater chance to be taken.
+
+Supported Instructions
+======================
+
+``BranchInst``
+^^^^^^^^^^^^^^
+
+Metadata is only assign to the conditional branches. There are two extra
+operarands, for the true and the false branch.
+
+.. code-block:: llvm
+
+ !0 = metadata !{
+ metadata !"branch_weights",
+ i32 <TRUE_BRANCH_WEIGHT>,
+ i32 <FALSE_BRANCH_WEIGHT>
+ }
+
+``SwitchInst``
+^^^^^^^^^^^^^^
+
+Branch weights are assign to every case (including ``default`` case which is
+always case #0).
+
+.. code-block:: llvm
+
+ !0 = metadata !{
+ metadata !"branch_weights",
+ i32 <DEFAULT_BRANCH_WEIGHT>
+ [ , i32 <CASE_BRANCH_WEIGHT> ... ]
+ }
+
+``IndirectBrInst``
+^^^^^^^^^^^^^^^^^^
+
+Branch weights are assign to every destination.
+
+.. code-block:: llvm
+
+ !0 = metadata !{
+ metadata !"branch_weights",
+ i32 <LABEL_BRANCH_WEIGHT>
+ [ , i32 <LABEL_BRANCH_WEIGHT> ... ]
+ }
+
+Other
+^^^^^
+
+Other terminator instructions are not allowed to contain Branch Weight Metadata.
+
+.. _\__builtin_expect:
+
+Built-in ``expect`` Instructions
+================================
+
+``__builtin_expect(long exp, long c)`` instruction provides branch prediction
+information. The return value is the value of ``exp``.
+
+It is especially useful in conditional statements. Currently Clang supports two
+conditional statements:
+
+``if`` statement
+^^^^^^^^^^^^^^^^
+
+The ``exp`` parameter is the condition. The ``c`` parameter is the expected
+comparison value. If it is equal to 1 (true), the condition is likely to be
+true, in other case condition is likely to be false. For example:
+
+.. code-block:: c++
+
+ if (__builtin_expect(x > 0, 1)) {
+ // This block is likely to be taken.
+ }
+
+``switch`` statement
+^^^^^^^^^^^^^^^^^^^^
+
+The ``exp`` parameter is the value. The ``c`` parameter is the expected
+value. If the expected value doesn't show on the cases list, the ``default``
+case is assumed to be likely taken.
+
+.. code-block:: c++
+
+ switch (__builtin_expect(x, 5)) {
+ default: break;
+ case 0: // ...
+ case 3: // ...
+ case 5: // This case is likely to be taken.
+ }
+
+CFG Modifications
+=================
+
+Branch Weight Metatada is not proof against CFG changes. If terminator operands'
+are changed some action should be taken. In other case some misoptimizations may
+occur due to incorrent branch prediction information.
diff --git a/docs/Bugpoint.rst b/docs/Bugpoint.rst
new file mode 100644
index 00000000000..9ccf0cc2d9d
--- /dev/null
+++ b/docs/Bugpoint.rst
@@ -0,0 +1,218 @@
+.. _bugpoint:
+
+====================================
+LLVM bugpoint tool: design and usage
+====================================
+
+.. contents::
+ :local:
+
+Description
+===========
+
+``bugpoint`` narrows down the source of problems in LLVM tools and passes. It
+can be used to debug three types of failures: optimizer crashes, miscompilations
+by optimizers, or bad native code generation (including problems in the static
+and JIT compilers). It aims to reduce large test cases to small, useful ones.
+For example, if ``opt`` crashes while optimizing a file, it will identify the
+optimization (or combination of optimizations) that causes the crash, and reduce
+the file down to a small example which triggers the crash.
+
+For detailed case scenarios, such as debugging ``opt``, or one of the LLVM code
+generators, see `How To Submit a Bug Report document <HowToSubmitABug.html>`_.
+
+Design Philosophy
+=================
+
+``bugpoint`` is designed to be a useful tool without requiring any hooks into
+the LLVM infrastructure at all. It works with any and all LLVM passes and code
+generators, and does not need to "know" how they work. Because of this, it may
+appear to do stupid things or miss obvious simplifications. ``bugpoint`` is
+also designed to trade off programmer time for computer time in the
+compiler-debugging process; consequently, it may take a long period of
+(unattended) time to reduce a test case, but we feel it is still worth it. Note
+that ``bugpoint`` is generally very quick unless debugging a miscompilation
+where each test of the program (which requires executing it) takes a long time.
+
+Automatic Debugger Selection
+----------------------------
+
+``bugpoint`` reads each ``.bc`` or ``.ll`` file specified on the command line
+and links them together into a single module, called the test program. If any
+LLVM passes are specified on the command line, it runs these passes on the test
+program. If any of the passes crash, or if they produce malformed output (which
+causes the verifier to abort), ``bugpoint`` starts the `crash debugger`_.
+
+Otherwise, if the ``-output`` option was not specified, ``bugpoint`` runs the
+test program with the "safe" backend (which is assumed to generate good code) to
+generate a reference output. Once ``bugpoint`` has a reference output for the
+test program, it tries executing it with the selected code generator. If the
+selected code generator crashes, ``bugpoint`` starts the `crash debugger`_ on
+the code generator. Otherwise, if the resulting output differs from the
+reference output, it assumes the difference resulted from a code generator
+failure, and starts the `code generator debugger`_.
+
+Finally, if the output of the selected code generator matches the reference
+output, ``bugpoint`` runs the test program after all of the LLVM passes have
+been applied to it. If its output differs from the reference output, it assumes
+the difference resulted from a failure in one of the LLVM passes, and enters the
+`miscompilation debugger`_. Otherwise, there is no problem ``bugpoint`` can
+debug.
+
+.. _crash debugger:
+
+Crash debugger
+--------------
+
+If an optimizer or code generator crashes, ``bugpoint`` will try as hard as it
+can to reduce the list of passes (for optimizer crashes) and the size of the
+test program. First, ``bugpoint`` figures out which combination of optimizer
+passes triggers the bug. This is useful when debugging a problem exposed by
+``opt``, for example, because it runs over 38 passes.
+
+Next, ``bugpoint`` tries removing functions from the test program, to reduce its
+size. Usually it is able to reduce a test program to a single function, when
+debugging intraprocedural optimizations. Once the number of functions has been
+reduced, it attempts to delete various edges in the control flow graph, to
+reduce the size of the function as much as possible. Finally, ``bugpoint``
+deletes any individual LLVM instructions whose absence does not eliminate the
+failure. At the end, ``bugpoint`` should tell you what passes crash, give you a
+bitcode file, and give you instructions on how to reproduce the failure with
+``opt`` or ``llc``.
+
+.. _code generator debugger:
+
+Code generator debugger
+-----------------------
+
+The code generator debugger attempts to narrow down the amount of code that is
+being miscompiled by the selected code generator. To do this, it takes the test
+program and partitions it into two pieces: one piece which it compiles with the
+"safe" backend (into a shared object), and one piece which it runs with either
+the JIT or the static LLC compiler. It uses several techniques to reduce the
+amount of code pushed through the LLVM code generator, to reduce the potential
+scope of the problem. After it is finished, it emits two bitcode files (called
+"test" [to be compiled with the code generator] and "safe" [to be compiled with
+the "safe" backend], respectively), and instructions for reproducing the
+problem. The code generator debugger assumes that the "safe" backend produces
+good code.
+
+.. _miscompilation debugger:
+
+Miscompilation debugger
+-----------------------
+
+The miscompilation debugger works similarly to the code generator debugger. It
+works by splitting the test program into two pieces, running the optimizations
+specified on one piece, linking the two pieces back together, and then executing
+the result. It attempts to narrow down the list of passes to the one (or few)
+which are causing the miscompilation, then reduce the portion of the test
+program which is being miscompiled. The miscompilation debugger assumes that
+the selected code generator is working properly.
+
+Advice for using bugpoint
+=========================
+
+``bugpoint`` can be a remarkably useful tool, but it sometimes works in
+non-obvious ways. Here are some hints and tips:
+
+* In the code generator and miscompilation debuggers, ``bugpoint`` only works
+ with programs that have deterministic output. Thus, if the program outputs
+ ``argv[0]``, the date, time, or any other "random" data, ``bugpoint`` may
+ misinterpret differences in these data, when output, as the result of a
+ miscompilation. Programs should be temporarily modified to disable outputs
+ that are likely to vary from run to run.
+
+* In the code generator and miscompilation debuggers, debugging will go faster
+ if you manually modify the program or its inputs to reduce the runtime, but
+ still exhibit the problem.
+
+* ``bugpoint`` is extremely useful when working on a new optimization: it helps
+ track down regressions quickly. To avoid having to relink ``bugpoint`` every
+ time you change your optimization however, have ``bugpoint`` dynamically load
+ your optimization with the ``-load`` option.
+
+* ``bugpoint`` can generate a lot of output and run for a long period of time.
+ It is often useful to capture the output of the program to file. For example,
+ in the C shell, you can run:
+
+ .. code-block:: bash
+
+ bugpoint ... |& tee bugpoint.log
+
+ to get a copy of ``bugpoint``'s output in the file ``bugpoint.log``, as well
+ as on your terminal.
+
+* ``bugpoint`` cannot debug problems with the LLVM linker. If ``bugpoint``
+ crashes before you see its "All input ok" message, you might try ``llvm-link
+ -v`` on the same set of input files. If that also crashes, you may be
+ experiencing a linker bug.
+
+* ``bugpoint`` is useful for proactively finding bugs in LLVM. Invoking
+ ``bugpoint`` with the ``-find-bugs`` option will cause the list of specified
+ optimizations to be randomized and applied to the program. This process will
+ repeat until a bug is found or the user kills ``bugpoint``.
+
+What to do when bugpoint isn't enough
+=====================================
+
+Sometimes, ``bugpoint`` is not enough. In particular, InstCombine and
+TargetLowering both have visitor structured code with lots of potential
+transformations. If the process of using bugpoint has left you with still too
+much code to figure out and the problem seems to be in instcombine, the
+following steps may help. These same techniques are useful with TargetLowering
+as well.
+
+Turn on ``-debug-only=instcombine`` and see which transformations within
+instcombine are firing by selecting out lines with "``IC``" in them.
+
+At this point, you have a decision to make. Is the number of transformations
+small enough to step through them using a debugger? If so, then try that.
+
+If there are too many transformations, then a source modification approach may
+be helpful. In this approach, you can modify the source code of instcombine to
+disable just those transformations that are being performed on your test input
+and perform a binary search over the set of transformations. One set of places
+to modify are the "``visit*``" methods of ``InstCombiner`` (*e.g.*
+``visitICmpInst``) by adding a "``return false``" as the first line of the
+method.
+
+If that still doesn't remove enough, then change the caller of
+``InstCombiner::DoOneIteration``, ``InstCombiner::runOnFunction`` to limit the
+number of iterations.
+
+You may also find it useful to use "``-stats``" now to see what parts of
+instcombine are firing. This can guide where to put additional reporting code.
+
+At this point, if the amount of transformations is still too large, then
+inserting code to limit whether or not to execute the body of the code in the
+visit function can be helpful. Add a static counter which is incremented on
+every invocation of the function. Then add code which simply returns false on
+desired ranges. For example:
+
+.. code-block:: c++
+
+
+ static int calledCount = 0;
+ calledCount++;
+ DEBUG(if (calledCount < 212) return false);
+ DEBUG(if (calledCount > 217) return false);
+ DEBUG(if (calledCount == 213) return false);
+ DEBUG(if (calledCount == 214) return false);
+ DEBUG(if (calledCount == 215) return false);
+ DEBUG(if (calledCount == 216) return false);
+ DEBUG(dbgs() << "visitXOR calledCount: " << calledCount << "\n");
+ DEBUG(dbgs() << "I: "; I->dump());
+
+could be added to ``visitXOR`` to limit ``visitXor`` to being applied only to
+calls 212 and 217. This is from an actual test case and raises an important
+point---a simple binary search may not be sufficient, as transformations that
+interact may require isolating more than one call. In TargetLowering, use
+``return SDNode();`` instead of ``return false;``.
+
+Now that that the number of transformations is down to a manageable number, try
+examining the output to see if you can figure out which transformations are
+being done. If that can be figured out, then do the usual debugging. If which
+code corresponds to the transformation being performed isn't obvious, set a
+breakpoint after the call count based disabling and step through the code.
+Alternatively, you can use "``printf``" style debugging to report waypoints.
diff --git a/docs/CMake.rst b/docs/CMake.rst
new file mode 100644
index 00000000000..e1761c5b1d4
--- /dev/null
+++ b/docs/CMake.rst
@@ -0,0 +1,423 @@
+.. _building-with-cmake:
+
+========================
+Building LLVM with CMake
+========================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+`CMake <http://www.cmake.org/>`_ is a cross-platform build-generator tool. CMake
+does not build the project, it generates the files needed by your build tool
+(GNU make, Visual Studio, etc) for building LLVM.
+
+If you are really anxious about getting a functional LLVM build, go to the
+`Quick start`_ section. If you are a CMake novice, start on `Basic CMake usage`_
+and then go back to the `Quick start`_ once you know what you are doing. The
+`Options and variables`_ section is a reference for customizing your build. If
+you already have experience with CMake, this is the recommended starting point.
+
+.. _Quick start:
+
+Quick start
+===========
+
+We use here the command-line, non-interactive CMake interface.
+
+#. `Download <http://www.cmake.org/cmake/resources/software.html>`_ and install
+ CMake. Version 2.8 is the minimum required.
+
+#. Open a shell. Your development tools must be reachable from this shell
+ through the PATH environment variable.
+
+#. Create a directory for containing the build. It is not supported to build
+ LLVM on the source directory. cd to this directory:
+
+ .. code-block:: bash
+
+ $ mkdir mybuilddir
+ $ cd mybuilddir
+
+#. Execute this command on the shell replacing `path/to/llvm/source/root` with
+ the path to the root of your LLVM source tree:
+
+ .. code-block:: bash
+
+ $ cmake path/to/llvm/source/root
+
+ CMake will detect your development environment, perform a series of test and
+ generate the files required for building LLVM. CMake will use default values
+ for all build parameters. See the `Options and variables`_ section for
+ fine-tuning your build
+
+ This can fail if CMake can't detect your toolset, or if it thinks that the
+ environment is not sane enough. On this case make sure that the toolset that
+ you intend to use is the only one reachable from the shell and that the shell
+ itself is the correct one for you development environment. CMake will refuse
+ to build MinGW makefiles if you have a POSIX shell reachable through the PATH
+ environment variable, for instance. You can force CMake to use a given build
+ tool, see the `Usage`_ section.
+
+.. _Basic CMake usage:
+.. _Usage:
+
+Basic CMake usage
+=================
+
+This section explains basic aspects of CMake, mostly for explaining those
+options which you may need on your day-to-day usage.
+
+CMake comes with extensive documentation in the form of html files and on the
+cmake executable itself. Execute ``cmake --help`` for further help options.
+
+CMake requires to know for which build tool it shall generate files (GNU make,
+Visual Studio, Xcode, etc). If not specified on the command line, it tries to
+guess it based on you environment. Once identified the build tool, CMake uses
+the corresponding *Generator* for creating files for your build tool. You can
+explicitly specify the generator with the command line option ``-G "Name of the
+generator"``. For knowing the available generators on your platform, execute
+
+.. code-block:: bash
+
+ $ cmake --help
+
+This will list the generator's names at the end of the help text. Generator's
+names are case-sensitive. Example:
+
+.. code-block:: bash
+
+ $ cmake -G "Visual Studio 9 2008" path/to/llvm/source/root
+
+For a given development platform there can be more than one adequate
+generator. If you use Visual Studio "NMake Makefiles" is a generator you can use
+for building with NMake. By default, CMake chooses the more specific generator
+supported by your development environment. If you want an alternative generator,
+you must tell this to CMake with the ``-G`` option.
+
+.. todo::
+
+ Explain variables and cache. Move explanation here from #options section.
+
+.. _Options and variables:
+
+Options and variables
+=====================
+
+Variables customize how the build will be generated. Options are boolean
+variables, with possible values ON/OFF. Options and variables are defined on the
+CMake command line like this:
+
+.. code-block:: bash
+
+ $ cmake -DVARIABLE=value path/to/llvm/source
+
+You can set a variable after the initial CMake invocation for changing its
+value. You can also undefine a variable:
+
+.. code-block:: bash
+
+ $ cmake -UVARIABLE path/to/llvm/source
+
+Variables are stored on the CMake cache. This is a file named ``CMakeCache.txt``
+on the root of the build directory. Do not hand-edit it.
+
+Variables are listed here appending its type after a colon. It is correct to
+write the variable and the type on the CMake command line:
+
+.. code-block:: bash
+
+ $ cmake -DVARIABLE:TYPE=value path/to/llvm/source
+
+Frequently-used CMake variables
+-------------------------------
+
+Here are listed some of the CMake variables that are used often, along with a
+brief explanation and LLVM-specific notes. For full documentation, check the
+CMake docs or execute ``cmake --help-variable VARIABLE_NAME``.
+
+**CMAKE_BUILD_TYPE**:STRING
+ Sets the build type for ``make`` based generators. Possible values are
+ Release, Debug, RelWithDebInfo and MinSizeRel. On systems like Visual Studio
+ the user sets the build type with the IDE settings.
+
+**CMAKE_INSTALL_PREFIX**:PATH
+ Path where LLVM will be installed if "make install" is invoked or the
+ "INSTALL" target is built.
+
+**LLVM_LIBDIR_SUFFIX**:STRING
+ Extra suffix to append to the directory where libraries are to be
+ installed. On a 64-bit architecture, one could use ``-DLLVM_LIBDIR_SUFFIX=64``
+ to install libraries to ``/usr/lib64``.
+
+**CMAKE_C_FLAGS**:STRING
+ Extra flags to use when compiling C source files.
+
+**CMAKE_CXX_FLAGS**:STRING
+ Extra flags to use when compiling C++ source files.
+
+**BUILD_SHARED_LIBS**:BOOL
+ Flag indicating is shared libraries will be built. Its default value is
+ OFF. Shared libraries are not supported on Windows and not recommended in the
+ other OSes.
+
+.. _LLVM-specific variables:
+
+LLVM-specific variables
+-----------------------
+
+**LLVM_TARGETS_TO_BUILD**:STRING
+ Semicolon-separated list of targets to build, or *all* for building all
+ targets. Case-sensitive. For Visual C++ defaults to *X86*. On the other cases
+ defaults to *all*. Example: ``-DLLVM_TARGETS_TO_BUILD="X86;PowerPC"``.
+
+**LLVM_BUILD_TOOLS**:BOOL
+ Build LLVM tools. Defaults to ON. Targets for building each tool are generated
+ in any case. You can build an tool separately by invoking its target. For
+ example, you can build *llvm-as* with a makefile-based system executing *make
+ llvm-as* on the root of your build directory.
+
+**LLVM_INCLUDE_TOOLS**:BOOL
+ Generate build targets for the LLVM tools. Defaults to ON. You can use that
+ option for disabling the generation of build targets for the LLVM tools.
+
+**LLVM_BUILD_EXAMPLES**:BOOL
+ Build LLVM examples. Defaults to OFF. Targets for building each example are
+ generated in any case. See documentation for *LLVM_BUILD_TOOLS* above for more
+ details.
+
+**LLVM_INCLUDE_EXAMPLES**:BOOL
+ Generate build targets for the LLVM examples. Defaults to ON. You can use that
+ option for disabling the generation of build targets for the LLVM examples.
+
+**LLVM_BUILD_TESTS**:BOOL
+ Build LLVM unit tests. Defaults to OFF. Targets for building each unit test
+ are generated in any case. You can build a specific unit test with the target
+ *UnitTestNameTests* (where at this time *UnitTestName* can be ADT, Analysis,
+ ExecutionEngine, JIT, Support, Transform, VMCore; see the subdirectories of
+ *unittests* for an updated list.) It is possible to build all unit tests with
+ the target *UnitTests*.
+
+**LLVM_INCLUDE_TESTS**:BOOL
+ Generate build targets for the LLVM unit tests. Defaults to ON. You can use
+ that option for disabling the generation of build targets for the LLVM unit
+ tests.
+
+**LLVM_APPEND_VC_REV**:BOOL
+ Append version control revision info (svn revision number or git revision id)
+ to LLVM version string (stored in the PACKAGE_VERSION macro). For this to work
+ cmake must be invoked before the build. Defaults to OFF.
+
+**LLVM_ENABLE_THREADS**:BOOL
+ Build with threads support, if available. Defaults to ON.
+
+**LLVM_ENABLE_ASSERTIONS**:BOOL
+ Enables code assertions. Defaults to OFF if and only if ``CMAKE_BUILD_TYPE``
+ is *Release*.
+
+**LLVM_ENABLE_PIC**:BOOL
+ Add the ``-fPIC`` flag for the compiler command-line, if the compiler supports
+ this flag. Some systems, like Windows, do not need this flag. Defaults to ON.
+
+**LLVM_ENABLE_WARNINGS**:BOOL
+ Enable all compiler warnings. Defaults to ON.
+
+**LLVM_ENABLE_PEDANTIC**:BOOL
+ Enable pedantic mode. This disable compiler specific extensions, is
+ possible. Defaults to ON.
+
+**LLVM_ENABLE_WERROR**:BOOL
+ Stop and fail build, if a compiler warning is triggered. Defaults to OFF.
+
+**LLVM_BUILD_32_BITS**:BOOL
+ Build 32-bits executables and libraries on 64-bits systems. This option is
+ available only on some 64-bits unix systems. Defaults to OFF.
+
+**LLVM_TARGET_ARCH**:STRING
+ LLVM target to use for native code generation. This is required for JIT
+ generation. It defaults to "host", meaning that it shall pick the architecture
+ of the machine where LLVM is being built. If you are cross-compiling, set it
+ to the target architecture name.
+
+**LLVM_TABLEGEN**:STRING
+ Full path to a native TableGen executable (usually named ``tblgen``). This is
+ intended for cross-compiling: if the user sets this variable, no native
+ TableGen will be created.
+
+**LLVM_LIT_ARGS**:STRING
+ Arguments given to lit. ``make check`` and ``make clang-test`` are affected.
+ By default, ``'-sv --no-progress-bar'`` on Visual C++ and Xcode, ``'-sv'`` on
+ others.
+
+**LLVM_LIT_TOOLS_DIR**:PATH
+ The path to GnuWin32 tools for tests. Valid on Windows host. Defaults to "",
+ then Lit seeks tools according to %PATH%. Lit can find tools(eg. grep, sort,
+ &c) on LLVM_LIT_TOOLS_DIR at first, without specifying GnuWin32 to %PATH%.
+
+**LLVM_ENABLE_FFI**:BOOL
+ Indicates whether LLVM Interpreter will be linked with Foreign Function
+ Interface library. If the library or its headers are installed on a custom
+ location, you can set the variables FFI_INCLUDE_DIR and
+ FFI_LIBRARY_DIR. Defaults to OFF.
+
+**LLVM_EXTERNAL_{CLANG,LLD,POLLY}_SOURCE_DIR**:PATH
+ Path to ``{Clang,lld,Polly}``\'s source directory. Defaults to
+ ``tools/{clang,lld,polly}``. ``{Clang,lld,Polly}`` will not be built when it
+ is empty or it does not point valid path.
+
+**LLVM_USE_OPROFILE**:BOOL
+ Enable building OProfile JIT support. Defaults to OFF
+
+**LLVM_USE_INTEL_JITEVENTS**:BOOL
+ Enable building support for Intel JIT Events API. Defaults to OFF
+
+**LLVM_INTEL_JITEVENTS_DIR**:PATH
+ Path to installation of Intel(R) VTune(TM) Amplifier XE 2011, used to locate
+ the ``jitprofiling`` library. Default = ``%VTUNE_AMPLIFIER_XE_2011_DIR%``
+ (Windows) | ``/opt/intel/vtune_amplifier_xe_2011`` (Linux)
+
+Executing the test suite
+========================
+
+Testing is performed when the *check* target is built. For instance, if you are
+using makefiles, execute this command while on the top level of your build
+directory:
+
+.. code-block:: bash
+
+ $ make check
+
+On Visual Studio, you may run tests to build the project "check".
+
+Cross compiling
+===============
+
+See `this wiki page <http://www.vtk.org/Wiki/CMake_Cross_Compiling>`_ for
+generic instructions on how to cross-compile with CMake. It goes into detailed
+explanations and may seem daunting, but it is not. On the wiki page there are
+several examples including toolchain files. Go directly to `this section
+<http://www.vtk.org/Wiki/CMake_Cross_Compiling#Information_how_to_set_up_various_cross_compiling_toolchains>`_
+for a quick solution.
+
+Also see the `LLVM-specific variables`_ section for variables used when
+cross-compiling.
+
+Embedding LLVM in your project
+==============================
+
+The most difficult part of adding LLVM to the build of a project is to determine
+the set of LLVM libraries corresponding to the set of required LLVM
+features. What follows is an example of how to obtain this information:
+
+.. code-block:: cmake
+
+ # A convenience variable:
+ set(LLVM_ROOT "" CACHE PATH "Root of LLVM install.")
+
+ # A bit of a sanity check:
+ if( NOT EXISTS ${LLVM_ROOT}/include/llvm )
+ message(FATAL_ERROR "LLVM_ROOT (${LLVM_ROOT}) is not a valid LLVM install")
+ endif()
+
+ # We incorporate the CMake features provided by LLVM:
+ set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${LLVM_ROOT}/share/llvm/cmake")
+ include(LLVMConfig)
+
+ # Now set the header and library paths:
+ include_directories( ${LLVM_INCLUDE_DIRS} )
+ link_directories( ${LLVM_LIBRARY_DIRS} )
+ add_definitions( ${LLVM_DEFINITIONS} )
+
+ # Let's suppose we want to build a JIT compiler with support for
+ # binary code (no interpreter):
+ llvm_map_components_to_libraries(REQ_LLVM_LIBRARIES jit native)
+
+ # Finally, we link the LLVM libraries to our executable:
+ target_link_libraries(mycompiler ${REQ_LLVM_LIBRARIES})
+
+This assumes that LLVM_ROOT points to an install of LLVM. The procedure works
+too for uninstalled builds although we need to take care to add an
+`include_directories` for the location of the headers on the LLVM source
+directory (if we are building out-of-source.)
+
+Alternativaly, you can utilize CMake's ``find_package`` functionality. Here is
+an equivalent variant of snippet shown above:
+
+.. code-block:: cmake
+
+ find_package(LLVM)
+
+ if( NOT LLVM_FOUND )
+ message(FATAL_ERROR "LLVM package can't be found. Set CMAKE_PREFIX_PATH variable to LLVM's installation prefix.")
+ endif()
+
+ include_directories( ${LLVM_INCLUDE_DIRS} )
+ link_directories( ${LLVM_LIBRARY_DIRS} )
+
+ llvm_map_components_to_libraries(REQ_LLVM_LIBRARIES jit native)
+
+ target_link_libraries(mycompiler ${REQ_LLVM_LIBRARIES})
+
+Developing LLVM pass out of source
+----------------------------------
+
+It is possible to develop LLVM passes against installed LLVM. An example of
+project layout provided below:
+
+.. code-block:: bash
+
+ <project dir>/
+ |
+ CMakeLists.txt
+ <pass name>/
+ |
+ CMakeLists.txt
+ Pass.cpp
+ ...
+
+Contents of ``<project dir>/CMakeLists.txt``:
+
+.. code-block:: cmake
+
+ find_package(LLVM)
+
+ # Define add_llvm_* macro's.
+ include(AddLLVM)
+
+ add_definitions(${LLVM_DEFINITIONS})
+ include_directories(${LLVM_INCLUDE_DIRS})
+ link_directories(${LLVM_LIBRARY_DIRS})
+
+ add_subdirectory(<pass name>)
+
+Contents of ``<project dir>/<pass name>/CMakeLists.txt``:
+
+.. code-block:: cmake
+
+ add_llvm_loadable_module(LLVMPassname
+ Pass.cpp
+ )
+
+When you are done developing your pass, you may wish to integrate it
+into LLVM source tree. You can achieve it in two easy steps:
+
+#. Copying ``<pass name>`` folder into ``<LLVM root>/lib/Transform`` directory.
+
+#. Adding ``add_subdirectory(<pass name>)`` line into
+ ``<LLVM root>/lib/Transform/CMakeLists.txt``.
+
+Compiler/Platform specific topics
+=================================
+
+Notes for specific compilers and/or platforms.
+
+Microsoft Visual C++
+--------------------
+
+**LLVM_COMPILER_JOBS**:STRING
+ Specifies the maximum number of parallell compiler jobs to use per project
+ when building with msbuild or Visual Studio. Only supported for Visual Studio
+ 2008 and Visual Studio 2010 CMake generators. 0 means use all
+ processors. Default is 0.
diff --git a/docs/CodeGenerator.rst b/docs/CodeGenerator.rst
new file mode 100644
index 00000000000..d1d0231105b
--- /dev/null
+++ b/docs/CodeGenerator.rst
@@ -0,0 +1,2428 @@
+.. _code_generator:
+
+==========================================
+The LLVM Target-Independent Code Generator
+==========================================
+
+.. role:: raw-html(raw)
+ :format: html
+
+.. raw:: html
+
+ <style>
+ .unknown { background-color: #C0C0C0; text-align: center; }
+ .unknown:before { content: "?" }
+ .no { background-color: #C11B17 }
+ .no:before { content: "N" }
+ .partial { background-color: #F88017 }
+ .yes { background-color: #0F0; }
+ .yes:before { content: "Y" }
+ </style>
+
+.. contents::
+ :local:
+
+.. warning::
+ This is a work in progress.
+
+Introduction
+============
+
+The LLVM target-independent code generator is a framework that provides a suite
+of reusable components for translating the LLVM internal representation to the
+machine code for a specified target---either in assembly form (suitable for a
+static compiler) or in binary machine code format (usable for a JIT
+compiler). The LLVM target-independent code generator consists of six main
+components:
+
+1. `Abstract target description`_ interfaces which capture important properties
+ about various aspects of the machine, independently of how they will be used.
+ These interfaces are defined in ``include/llvm/Target/``.
+
+2. Classes used to represent the `code being generated`_ for a target. These
+ classes are intended to be abstract enough to represent the machine code for
+ *any* target machine. These classes are defined in
+ ``include/llvm/CodeGen/``. At this level, concepts like "constant pool
+ entries" and "jump tables" are explicitly exposed.
+
+3. Classes and algorithms used to represent code as the object file level, the
+ `MC Layer`_. These classes represent assembly level constructs like labels,
+ sections, and instructions. At this level, concepts like "constant pool
+ entries" and "jump tables" don't exist.
+
+4. `Target-independent algorithms`_ used to implement various phases of native
+ code generation (register allocation, scheduling, stack frame representation,
+ etc). This code lives in ``lib/CodeGen/``.
+
+5. `Implementations of the abstract target description interfaces`_ for
+ particular targets. These machine descriptions make use of the components
+ provided by LLVM, and can optionally provide custom target-specific passes,
+ to build complete code generators for a specific target. Target descriptions
+ live in ``lib/Target/``.
+
+6. The target-independent JIT components. The LLVM JIT is completely target
+ independent (it uses the ``TargetJITInfo`` structure to interface for
+ target-specific issues. The code for the target-independent JIT lives in
+ ``lib/ExecutionEngine/JIT``.
+
+Depending on which part of the code generator you are interested in working on,
+different pieces of this will be useful to you. In any case, you should be
+familiar with the `target description`_ and `machine code representation`_
+classes. If you want to add a backend for a new target, you will need to
+`implement the target description`_ classes for your new target and understand
+the `LLVM code representation <LangRef.html>`_. If you are interested in
+implementing a new `code generation algorithm`_, it should only depend on the
+target-description and machine code representation classes, ensuring that it is
+portable.
+
+Required components in the code generator
+-----------------------------------------
+
+The two pieces of the LLVM code generator are the high-level interface to the
+code generator and the set of reusable components that can be used to build
+target-specific backends. The two most important interfaces (:raw-html:`<tt>`
+`TargetMachine`_ :raw-html:`</tt>` and :raw-html:`<tt>` `TargetData`_
+:raw-html:`</tt>`) are the only ones that are required to be defined for a
+backend to fit into the LLVM system, but the others must be defined if the
+reusable code generator components are going to be used.
+
+This design has two important implications. The first is that LLVM can support
+completely non-traditional code generation targets. For example, the C backend
+does not require register allocation, instruction selection, or any of the other
+standard components provided by the system. As such, it only implements these
+two interfaces, and does its own thing. Note that C backend was removed from the
+trunk since LLVM 3.1 release. Another example of a code generator like this is a
+(purely hypothetical) backend that converts LLVM to the GCC RTL form and uses
+GCC to emit machine code for a target.
+
+This design also implies that it is possible to design and implement radically
+different code generators in the LLVM system that do not make use of any of the
+built-in components. Doing so is not recommended at all, but could be required
+for radically different targets that do not fit into the LLVM machine
+description model: FPGAs for example.
+
+.. _high-level design of the code generator:
+
+The high-level design of the code generator
+-------------------------------------------
+
+The LLVM target-independent code generator is designed to support efficient and
+quality code generation for standard register-based microprocessors. Code
+generation in this model is divided into the following stages:
+
+1. `Instruction Selection`_ --- This phase determines an efficient way to
+ express the input LLVM code in the target instruction set. This stage
+ produces the initial code for the program in the target instruction set, then
+ makes use of virtual registers in SSA form and physical registers that
+ represent any required register assignments due to target constraints or
+ calling conventions. This step turns the LLVM code into a DAG of target
+ instructions.
+
+2. `Scheduling and Formation`_ --- This phase takes the DAG of target
+ instructions produced by the instruction selection phase, determines an
+ ordering of the instructions, then emits the instructions as :raw-html:`<tt>`
+ `MachineInstr`_\s :raw-html:`</tt>` with that ordering. Note that we
+ describe this in the `instruction selection section`_ because it operates on
+ a `SelectionDAG`_.
+
+3. `SSA-based Machine Code Optimizations`_ --- This optional stage consists of a
+ series of machine-code optimizations that operate on the SSA-form produced by
+ the instruction selector. Optimizations like modulo-scheduling or peephole
+ optimization work here.
+
+4. `Register Allocation`_ --- The target code is transformed from an infinite
+ virtual register file in SSA form to the concrete register file used by the
+ target. This phase introduces spill code and eliminates all virtual register
+ references from the program.
+
+5. `Prolog/Epilog Code Insertion`_ --- Once the machine code has been generated
+ for the function and the amount of stack space required is known (used for
+ LLVM alloca's and spill slots), the prolog and epilog code for the function
+ can be inserted and "abstract stack location references" can be eliminated.
+ This stage is responsible for implementing optimizations like frame-pointer
+ elimination and stack packing.
+
+6. `Late Machine Code Optimizations`_ --- Optimizations that operate on "final"
+ machine code can go here, such as spill code scheduling and peephole
+ optimizations.
+
+7. `Code Emission`_ --- The final stage actually puts out the code for the
+ current function, either in the target assembler format or in machine
+ code.
+
+The code generator is based on the assumption that the instruction selector will
+use an optimal pattern matching selector to create high-quality sequences of
+native instructions. Alternative code generator designs based on pattern
+expansion and aggressive iterative peephole optimization are much slower. This
+design permits efficient compilation (important for JIT environments) and
+aggressive optimization (used when generating code offline) by allowing
+components of varying levels of sophistication to be used for any step of
+compilation.
+
+In addition to these stages, target implementations can insert arbitrary
+target-specific passes into the flow. For example, the X86 target uses a
+special pass to handle the 80x87 floating point stack architecture. Other
+targets with unusual requirements can be supported with custom passes as needed.
+
+Using TableGen for target description
+-------------------------------------
+
+The target description classes require a detailed description of the target
+architecture. These target descriptions often have a large amount of common
+information (e.g., an ``add`` instruction is almost identical to a ``sub``
+instruction). In order to allow the maximum amount of commonality to be
+factored out, the LLVM code generator uses the
+`TableGen <TableGenFundamentals.html>`_ tool to describe big chunks of the
+target machine, which allows the use of domain-specific and target-specific
+abstractions to reduce the amount of repetition.
+
+As LLVM continues to be developed and refined, we plan to move more and more of
+the target description to the ``.td`` form. Doing so gives us a number of
+advantages. The most important is that it makes it easier to port LLVM because
+it reduces the amount of C++ code that has to be written, and the surface area
+of the code generator that needs to be understood before someone can get
+something working. Second, it makes it easier to change things. In particular,
+if tables and other things are all emitted by ``tblgen``, we only need a change
+in one place (``tblgen``) to update all of the targets to a new interface.
+
+.. _Abstract target description:
+.. _target description:
+
+Target description classes
+==========================
+
+The LLVM target description classes (located in the ``include/llvm/Target``
+directory) provide an abstract description of the target machine independent of
+any particular client. These classes are designed to capture the *abstract*
+properties of the target (such as the instructions and registers it has), and do
+not incorporate any particular pieces of code generation algorithms.
+
+All of the target description classes (except the :raw-html:`<tt>` `TargetData`_
+:raw-html:`</tt>` class) are designed to be subclassed by the concrete target
+implementation, and have virtual methods implemented. To get to these
+implementations, the :raw-html:`<tt>` `TargetMachine`_ :raw-html:`</tt>` class
+provides accessors that should be implemented by the target.
+
+.. _TargetMachine:
+
+The ``TargetMachine`` class
+---------------------------
+
+The ``TargetMachine`` class provides virtual methods that are used to access the
+target-specific implementations of the various target description classes via
+the ``get*Info`` methods (``getInstrInfo``, ``getRegisterInfo``,
+``getFrameInfo``, etc.). This class is designed to be specialized by a concrete
+target implementation (e.g., ``X86TargetMachine``) which implements the various
+virtual methods. The only required target description class is the
+:raw-html:`<tt>` `TargetData`_ :raw-html:`</tt>` class, but if the code
+generator components are to be used, the other interfaces should be implemented
+as well.
+
+.. _TargetData:
+
+The ``TargetData`` class
+------------------------
+
+The ``TargetData`` class is the only required target description class, and it
+is the only class that is not extensible (you cannot derived a new class from
+it). ``TargetData`` specifies information about how the target lays out memory
+for structures, the alignment requirements for various data types, the size of
+pointers in the target, and whether the target is little-endian or
+big-endian.
+
+.. _targetlowering:
+
+The ``TargetLowering`` class
+----------------------------
+
+The ``TargetLowering`` class is used by SelectionDAG based instruction selectors
+primarily to describe how LLVM code should be lowered to SelectionDAG
+operations. Among other things, this class indicates:
+
+* an initial register class to use for various ``ValueType``\s,
+
+* which operations are natively supported by the target machine,
+
+* the return type of ``setcc`` operations,
+
+* the type to use for shift amounts, and
+
+* various high-level characteristics, like whether it is profitable to turn
+ division by a constant into a multiplication sequence
+
+The ``TargetRegisterInfo`` class
+--------------------------------
+
+The ``TargetRegisterInfo`` class is used to describe the register file of the
+target and any interactions between the registers.
+
+Registers in the code generator are represented in the code generator by
+unsigned integers. Physical registers (those that actually exist in the target
+description) are unique small numbers, and virtual registers are generally
+large. Note that register ``#0`` is reserved as a flag value.
+
+Each register in the processor description has an associated
+``TargetRegisterDesc`` entry, which provides a textual name for the register
+(used for assembly output and debugging dumps) and a set of aliases (used to
+indicate whether one register overlaps with another).
+
+In addition to the per-register description, the ``TargetRegisterInfo`` class
+exposes a set of processor specific register classes (instances of the
+``TargetRegisterClass`` class). Each register class contains sets of registers
+that have the same properties (for example, they are all 32-bit integer
+registers). Each SSA virtual register created by the instruction selector has
+an associated register class. When the register allocator runs, it replaces
+virtual registers with a physical register in the set.
+
+The target-specific implementations of these classes is auto-generated from a
+`TableGen <TableGenFundamentals.html>`_ description of the register file.
+
+.. _TargetInstrInfo:
+
+The ``TargetInstrInfo`` class
+-----------------------------
+
+The ``TargetInstrInfo`` class is used to describe the machine instructions
+supported by the target. It is essentially an array of ``TargetInstrDescriptor``
+objects, each of which describes one instruction the target
+supports. Descriptors define things like the mnemonic for the opcode, the number
+of operands, the list of implicit register uses and defs, whether the
+instruction has certain target-independent properties (accesses memory, is
+commutable, etc), and holds any target-specific flags.
+
+The ``TargetFrameInfo`` class
+-----------------------------
+
+The ``TargetFrameInfo`` class is used to provide information about the stack
+frame layout of the target. It holds the direction of stack growth, the known
+stack alignment on entry to each function, and the offset to the local area.
+The offset to the local area is the offset from the stack pointer on function
+entry to the first location where function data (local variables, spill
+locations) can be stored.
+
+The ``TargetSubtarget`` class
+-----------------------------
+
+The ``TargetSubtarget`` class is used to provide information about the specific
+chip set being targeted. A sub-target informs code generation of which
+instructions are supported, instruction latencies and instruction execution
+itinerary; i.e., which processing units are used, in what order, and for how
+long.
+
+The ``TargetJITInfo`` class
+---------------------------
+
+The ``TargetJITInfo`` class exposes an abstract interface used by the
+Just-In-Time code generator to perform target-specific activities, such as
+emitting stubs. If a ``TargetMachine`` supports JIT code generation, it should
+provide one of these objects through the ``getJITInfo`` method.
+
+.. _code being generated:
+.. _machine code representation:
+
+Machine code description classes
+================================
+
+At the high-level, LLVM code is translated to a machine specific representation
+formed out of :raw-html:`<tt>` `MachineFunction`_ :raw-html:`</tt>`,
+:raw-html:`<tt>` `MachineBasicBlock`_ :raw-html:`</tt>`, and :raw-html:`<tt>`
+`MachineInstr`_ :raw-html:`</tt>` instances (defined in
+``include/llvm/CodeGen``). This representation is completely target agnostic,
+representing instructions in their most abstract form: an opcode and a series of
+operands. This representation is designed to support both an SSA representation
+for machine code, as well as a register allocated, non-SSA form.
+
+.. _MachineInstr:
+
+The ``MachineInstr`` class
+--------------------------
+
+Target machine instructions are represented as instances of the ``MachineInstr``
+class. This class is an extremely abstract way of representing machine
+instructions. In particular, it only keeps track of an opcode number and a set
+of operands.
+
+The opcode number is a simple unsigned integer that only has meaning to a
+specific backend. All of the instructions for a target should be defined in the
+``*InstrInfo.td`` file for the target. The opcode enum values are auto-generated
+from this description. The ``MachineInstr`` class does not have any information
+about how to interpret the instruction (i.e., what the semantics of the
+instruction are); for that you must refer to the :raw-html:`<tt>`
+`TargetInstrInfo`_ :raw-html:`</tt>` class.
+
+The operands of a machine instruction can be of several different types: a
+register reference, a constant integer, a basic block reference, etc. In
+addition, a machine operand should be marked as a def or a use of the value
+(though only registers are allowed to be defs).
+
+By convention, the LLVM code generator orders instruction operands so that all
+register definitions come before the register uses, even on architectures that
+are normally printed in other orders. For example, the SPARC add instruction:
+"``add %i1, %i2, %i3``" adds the "%i1", and "%i2" registers and stores the
+result into the "%i3" register. In the LLVM code generator, the operands should
+be stored as "``%i3, %i1, %i2``": with the destination first.
+
+Keeping destination (definition) operands at the beginning of the operand list
+has several advantages. In particular, the debugging printer will print the
+instruction like this:
+
+.. code-block:: llvm
+
+ %r3 = add %i1, %i2
+
+Also if the first operand is a def, it is easier to `create instructions`_ whose
+only def is the first operand.
+
+.. _create instructions:
+
+Using the ``MachineInstrBuilder.h`` functions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Machine instructions are created by using the ``BuildMI`` functions, located in
+the ``include/llvm/CodeGen/MachineInstrBuilder.h`` file. The ``BuildMI``
+functions make it easy to build arbitrary machine instructions. Usage of the
+``BuildMI`` functions look like this:
+
+.. code-block:: c++
+
+ // Create a 'DestReg = mov 42' (rendered in X86 assembly as 'mov DestReg, 42')
+ // instruction. The '1' specifies how many operands will be added.
+ MachineInstr *MI = BuildMI(X86::MOV32ri, 1, DestReg).addImm(42);
+
+ // Create the same instr, but insert it at the end of a basic block.
+ MachineBasicBlock &amp;MBB = ...
+ BuildMI(MBB, X86::MOV32ri, 1, DestReg).addImm(42);
+
+ // Create the same instr, but insert it before a specified iterator point.
+ MachineBasicBlock::iterator MBBI = ...
+ BuildMI(MBB, MBBI, X86::MOV32ri, 1, DestReg).addImm(42);
+
+ // Create a 'cmp Reg, 0' instruction, no destination reg.
+ MI = BuildMI(X86::CMP32ri, 2).addReg(Reg).addImm(0);
+
+ // Create an 'sahf' instruction which takes no operands and stores nothing.
+ MI = BuildMI(X86::SAHF, 0);
+
+ // Create a self looping branch instruction.
+ BuildMI(MBB, X86::JNE, 1).addMBB(&amp;MBB);
+
+The key thing to remember with the ``BuildMI`` functions is that you have to
+specify the number of operands that the machine instruction will take. This
+allows for efficient memory allocation. You also need to specify if operands
+default to be uses of values, not definitions. If you need to add a definition
+operand (other than the optional destination register), you must explicitly mark
+it as such:
+
+.. code-block:: c++
+
+ MI.addReg(Reg, RegState::Define);
+
+Fixed (preassigned) registers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+One important issue that the code generator needs to be aware of is the presence
+of fixed registers. In particular, there are often places in the instruction
+stream where the register allocator *must* arrange for a particular value to be
+in a particular register. This can occur due to limitations of the instruction
+set (e.g., the X86 can only do a 32-bit divide with the ``EAX``/``EDX``
+registers), or external factors like calling conventions. In any case, the
+instruction selector should emit code that copies a virtual register into or out
+of a physical register when needed.
+
+For example, consider this simple LLVM example:
+
+.. code-block:: llvm
+
+ define i32 @test(i32 %X, i32 %Y) {
+ %Z = udiv i32 %X, %Y
+ ret i32 %Z
+ }
+
+The X86 instruction selector produces this machine code for the ``div`` and
+``ret`` (use "``llc X.bc -march=x86 -print-machineinstrs``" to get this):
+
+.. code-block:: llvm
+
+ ;; Start of div
+ %EAX = mov %reg1024 ;; Copy X (in reg1024) into EAX
+ %reg1027 = sar %reg1024, 31
+ %EDX = mov %reg1027 ;; Sign extend X into EDX
+ idiv %reg1025 ;; Divide by Y (in reg1025)
+ %reg1026 = mov %EAX ;; Read the result (Z) out of EAX
+
+ ;; Start of ret
+ %EAX = mov %reg1026 ;; 32-bit return value goes in EAX
+ ret
+
+By the end of code generation, the register allocator has coalesced the
+registers and deleted the resultant identity moves producing the following
+code:
+
+.. code-block:: llvm
+
+ ;; X is in EAX, Y is in ECX
+ mov %EAX, %EDX
+ sar %EDX, 31
+ idiv %ECX
+ ret
+
+This approach is extremely general (if it can handle the X86 architecture, it
+can handle anything!) and allows all of the target specific knowledge about the
+instruction stream to be isolated in the instruction selector. Note that
+physical registers should have a short lifetime for good code generation, and
+all physical registers are assumed dead on entry to and exit from basic blocks
+(before register allocation). Thus, if you need a value to be live across basic
+block boundaries, it *must* live in a virtual register.
+
+Call-clobbered registers
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Some machine instructions, like calls, clobber a large number of physical
+registers. Rather than adding ``<def,dead>`` operands for all of them, it is
+possible to use an ``MO_RegisterMask`` operand instead. The register mask
+operand holds a bit mask of preserved registers, and everything else is
+considered to be clobbered by the instruction.
+
+Machine code in SSA form
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+``MachineInstr``'s are initially selected in SSA-form, and are maintained in
+SSA-form until register allocation happens. For the most part, this is
+trivially simple since LLVM is already in SSA form; LLVM PHI nodes become
+machine code PHI nodes, and virtual registers are only allowed to have a single
+definition.
+
+After register allocation, machine code is no longer in SSA-form because there
+are no virtual registers left in the code.
+
+.. _MachineBasicBlock:
+
+The ``MachineBasicBlock`` class
+-------------------------------
+
+The ``MachineBasicBlock`` class contains a list of machine instructions
+(:raw-html:`<tt>` `MachineInstr`_ :raw-html:`</tt>` instances). It roughly
+corresponds to the LLVM code input to the instruction selector, but there can be
+a one-to-many mapping (i.e. one LLVM basic block can map to multiple machine
+basic blocks). The ``MachineBasicBlock`` class has a "``getBasicBlock``" method,
+which returns the LLVM basic block that it comes from.
+
+.. _MachineFunction:
+
+The ``MachineFunction`` class
+-----------------------------
+
+The ``MachineFunction`` class contains a list of machine basic blocks
+(:raw-html:`<tt>` `MachineBasicBlock`_ :raw-html:`</tt>` instances). It
+corresponds one-to-one with the LLVM function input to the instruction selector.
+In addition to a list of basic blocks, the ``MachineFunction`` contains a a
+``MachineConstantPool``, a ``MachineFrameInfo``, a ``MachineFunctionInfo``, and
+a ``MachineRegisterInfo``. See ``include/llvm/CodeGen/MachineFunction.h`` for
+more information.
+
+``MachineInstr Bundles``
+------------------------
+
+LLVM code generator can model sequences of instructions as MachineInstr
+bundles. A MI bundle can model a VLIW group / pack which contains an arbitrary
+number of parallel instructions. It can also be used to model a sequential list
+of instructions (potentially with data dependencies) that cannot be legally
+separated (e.g. ARM Thumb2 IT blocks).
+
+Conceptually a MI bundle is a MI with a number of other MIs nested within:
+
+::
+
+ --------------
+ | Bundle | ---------
+ -------------- \
+ | ----------------
+ | | MI |
+ | ----------------
+ | |
+ | ----------------
+ | | MI |
+ | ----------------
+ | |
+ | ----------------
+ | | MI |
+ | ----------------
+ |
+ --------------
+ | Bundle | --------
+ -------------- \
+ | ----------------
+ | | MI |
+ | ----------------
+ | |
+ | ----------------
+ | | MI |
+ | ----------------
+ | |
+ | ...
+ |
+ --------------
+ | Bundle | --------
+ -------------- \
+ |
+ ...
+
+MI bundle support does not change the physical representations of
+MachineBasicBlock and MachineInstr. All the MIs (including top level and nested
+ones) are stored as sequential list of MIs. The "bundled" MIs are marked with
+the 'InsideBundle' flag. A top level MI with the special BUNDLE opcode is used
+to represent the start of a bundle. It's legal to mix BUNDLE MIs with indiviual
+MIs that are not inside bundles nor represent bundles.
+
+MachineInstr passes should operate on a MI bundle as a single unit. Member
+methods have been taught to correctly handle bundles and MIs inside bundles.
+The MachineBasicBlock iterator has been modified to skip over bundled MIs to
+enforce the bundle-as-a-single-unit concept. An alternative iterator
+instr_iterator has been added to MachineBasicBlock to allow passes to iterate
+over all of the MIs in a MachineBasicBlock, including those which are nested
+inside bundles. The top level BUNDLE instruction must have the correct set of
+register MachineOperand's that represent the cumulative inputs and outputs of
+the bundled MIs.
+
+Packing / bundling of MachineInstr's should be done as part of the register
+allocation super-pass. More specifically, the pass which determines what MIs
+should be bundled together must be done after code generator exits SSA form
+(i.e. after two-address pass, PHI elimination, and copy coalescing). Bundles
+should only be finalized (i.e. adding BUNDLE MIs and input and output register
+MachineOperands) after virtual registers have been rewritten into physical
+registers. This requirement eliminates the need to add virtual register operands
+to BUNDLE instructions which would effectively double the virtual register def
+and use lists.
+
+.. _MC Layer:
+
+The "MC" Layer
+==============
+
+The MC Layer is used to represent and process code at the raw machine code
+level, devoid of "high level" information like "constant pools", "jump tables",
+"global variables" or anything like that. At this level, LLVM handles things
+like label names, machine instructions, and sections in the object file. The
+code in this layer is used for a number of important purposes: the tail end of
+the code generator uses it to write a .s or .o file, and it is also used by the
+llvm-mc tool to implement standalone machine code assemblers and disassemblers.
+
+This section describes some of the important classes. There are also a number
+of important subsystems that interact at this layer, they are described later in
+this manual.
+
+.. _MCStreamer:
+
+The ``MCStreamer`` API
+----------------------
+
+MCStreamer is best thought of as an assembler API. It is an abstract API which
+is *implemented* in different ways (e.g. to output a .s file, output an ELF .o
+file, etc) but whose API correspond directly to what you see in a .s file.
+MCStreamer has one method per directive, such as EmitLabel, EmitSymbolAttribute,
+SwitchSection, EmitValue (for .byte, .word), etc, which directly correspond to
+assembly level directives. It also has an EmitInstruction method, which is used
+to output an MCInst to the streamer.
+
+This API is most important for two clients: the llvm-mc stand-alone assembler is
+effectively a parser that parses a line, then invokes a method on MCStreamer. In
+the code generator, the `Code Emission`_ phase of the code generator lowers
+higher level LLVM IR and Machine* constructs down to the MC layer, emitting
+directives through MCStreamer.
+
+On the implementation side of MCStreamer, there are two major implementations:
+one for writing out a .s file (MCAsmStreamer), and one for writing out a .o
+file (MCObjectStreamer). MCAsmStreamer is a straight-forward implementation
+that prints out a directive for each method (e.g. ``EmitValue -> .byte``), but
+MCObjectStreamer implements a full assembler.
+
+The ``MCContext`` class
+-----------------------
+
+The MCContext class is the owner of a variety of uniqued data structures at the
+MC layer, including symbols, sections, etc. As such, this is the class that you
+interact with to create symbols and sections. This class can not be subclassed.
+
+The ``MCSymbol`` class
+----------------------
+
+The MCSymbol class represents a symbol (aka label) in the assembly file. There
+are two interesting kinds of symbols: assembler temporary symbols, and normal
+symbols. Assembler temporary symbols are used and processed by the assembler
+but are discarded when the object file is produced. The distinction is usually
+represented by adding a prefix to the label, for example "L" labels are
+assembler temporary labels in MachO.
+
+MCSymbols are created by MCContext and uniqued there. This means that MCSymbols
+can be compared for pointer equivalence to find out if they are the same symbol.
+Note that pointer inequality does not guarantee the labels will end up at
+different addresses though. It's perfectly legal to output something like this
+to the .s file:
+
+::
+
+ foo:
+ bar:
+ .byte 4
+
+In this case, both the foo and bar symbols will have the same address.
+
+The ``MCSection`` class
+-----------------------
+
+The ``MCSection`` class represents an object-file specific section. It is
+subclassed by object file specific implementations (e.g. ``MCSectionMachO``,
+``MCSectionCOFF``, ``MCSectionELF``) and these are created and uniqued by
+MCContext. The MCStreamer has a notion of the current section, which can be
+changed with the SwitchToSection method (which corresponds to a ".section"
+directive in a .s file).
+
+.. _MCInst:
+
+The ``MCInst`` class
+--------------------
+
+The ``MCInst`` class is a target-independent representation of an instruction.
+It is a simple class (much more so than `MachineInstr`_) that holds a
+target-specific opcode and a vector of MCOperands. MCOperand, in turn, is a
+simple discriminated union of three cases: 1) a simple immediate, 2) a target
+register ID, 3) a symbolic expression (e.g. "``Lfoo-Lbar+42``") as an MCExpr.
+
+MCInst is the common currency used to represent machine instructions at the MC
+layer. It is the type used by the instruction encoder, the instruction printer,
+and the type generated by the assembly parser and disassembler.
+
+.. _Target-independent algorithms:
+.. _code generation algorithm:
+
+Target-independent code generation algorithms
+=============================================
+
+This section documents the phases described in the `high-level design of the
+code generator`_. It explains how they work and some of the rationale behind
+their design.
+
+.. _Instruction Selection:
+.. _instruction selection section:
+
+Instruction Selection
+---------------------
+
+Instruction Selection is the process of translating LLVM code presented to the
+code generator into target-specific machine instructions. There are several
+well-known ways to do this in the literature. LLVM uses a SelectionDAG based
+instruction selector.
+
+Portions of the DAG instruction selector are generated from the target
+description (``*.td``) files. Our goal is for the entire instruction selector
+to be generated from these ``.td`` files, though currently there are still
+things that require custom C++ code.
+
+.. _SelectionDAG:
+
+Introduction to SelectionDAGs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The SelectionDAG provides an abstraction for code representation in a way that
+is amenable to instruction selection using automatic techniques
+(e.g. dynamic-programming based optimal pattern matching selectors). It is also
+well-suited to other phases of code generation; in particular, instruction
+scheduling (SelectionDAG's are very close to scheduling DAGs post-selection).
+Additionally, the SelectionDAG provides a host representation where a large
+variety of very-low-level (but target-independent) `optimizations`_ may be
+performed; ones which require extensive information about the instructions
+efficiently supported by the target.
+
+The SelectionDAG is a Directed-Acyclic-Graph whose nodes are instances of the
+``SDNode`` class. The primary payload of the ``SDNode`` is its operation code
+(Opcode) that indicates what operation the node performs and the operands to the
+operation. The various operation node types are described at the top of the
+``include/llvm/CodeGen/SelectionDAGNodes.h`` file.
+
+Although most operations define a single value, each node in the graph may
+define multiple values. For example, a combined div/rem operation will define
+both the dividend and the remainder. Many other situations require multiple
+values as well. Each node also has some number of operands, which are edges to
+the node defining the used value. Because nodes may define multiple values,
+edges are represented by instances of the ``SDValue`` class, which is a
+``<SDNode, unsigned>`` pair, indicating the node and result value being used,
+respectively. Each value produced by an ``SDNode`` has an associated ``MVT``
+(Machine Value Type) indicating what the type of the value is.
+
+SelectionDAGs contain two different kinds of values: those that represent data
+flow and those that represent control flow dependencies. Data values are simple
+edges with an integer or floating point value type. Control edges are
+represented as "chain" edges which are of type ``MVT::Other``. These edges
+provide an ordering between nodes that have side effects (such as loads, stores,
+calls, returns, etc). All nodes that have side effects should take a token
+chain as input and produce a new one as output. By convention, token chain
+inputs are always operand #0, and chain results are always the last value
+produced by an operation.
+
+A SelectionDAG has designated "Entry" and "Root" nodes. The Entry node is
+always a marker node with an Opcode of ``ISD::EntryToken``. The Root node is
+the final side-effecting node in the token chain. For example, in a single basic
+block function it would be the return node.
+
+One important concept for SelectionDAGs is the notion of a "legal" vs.
+"illegal" DAG. A legal DAG for a target is one that only uses supported
+operations and supported types. On a 32-bit PowerPC, for example, a DAG with a
+value of type i1, i8, i16, or i64 would be illegal, as would a DAG that uses a
+SREM or UREM operation. The `legalize types`_ and `legalize operations`_ phases
+are responsible for turning an illegal DAG into a legal DAG.
+
+SelectionDAG Instruction Selection Process
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+SelectionDAG-based instruction selection consists of the following steps:
+
+#. `Build initial DAG`_ --- This stage performs a simple translation from the
+ input LLVM code to an illegal SelectionDAG.
+
+#. `Optimize SelectionDAG`_ --- This stage performs simple optimizations on the
+ SelectionDAG to simplify it, and recognize meta instructions (like rotates
+ and ``div``/``rem`` pairs) for targets that support these meta operations.
+ This makes the resultant code more efficient and the `select instructions
+ from DAG`_ phase (below) simpler.
+
+#. `Legalize SelectionDAG Types`_ --- This stage transforms SelectionDAG nodes
+ to eliminate any types that are unsupported on the target.
+
+#. `Optimize SelectionDAG`_ --- The SelectionDAG optimizer is run to clean up
+ redundancies exposed by type legalization.
+
+#. `Legalize SelectionDAG Ops`_ --- This stage transforms SelectionDAG nodes to
+ eliminate any operations that are unsupported on the target.
+
+#. `Optimize SelectionDAG`_ --- The SelectionDAG optimizer is run to eliminate
+ inefficiencies introduced by operation legalization.
+
+#. `Select instructions from DAG`_ --- Finally, the target instruction selector
+ matches the DAG operations to target instructions. This process translates
+ the target-independent input DAG into another DAG of target instructions.
+
+#. `SelectionDAG Scheduling and Formation`_ --- The last phase assigns a linear
+ order to the instructions in the target-instruction DAG and emits them into
+ the MachineFunction being compiled. This step uses traditional prepass
+ scheduling techniques.
+
+After all of these steps are complete, the SelectionDAG is destroyed and the
+rest of the code generation passes are run.
+
+One great way to visualize what is going on here is to take advantage of a few
+LLC command line options. The following options pop up a window displaying the
+SelectionDAG at specific times (if you only get errors printed to the console
+while using this, you probably `need to configure your
+system <ProgrammersManual.html#ViewGraph>`_ to add support for it).
+
+* ``-view-dag-combine1-dags`` displays the DAG after being built, before the
+ first optimization pass.
+
+* ``-view-legalize-dags`` displays the DAG before Legalization.
+
+* ``-view-dag-combine2-dags`` displays the DAG before the second optimization
+ pass.
+
+* ``-view-isel-dags`` displays the DAG before the Select phase.
+
+* ``-view-sched-dags`` displays the DAG before Scheduling.
+
+The ``-view-sunit-dags`` displays the Scheduler's dependency graph. This graph
+is based on the final SelectionDAG, with nodes that must be scheduled together
+bundled into a single scheduling-unit node, and with immediate operands and
+other nodes that aren't relevant for scheduling omitted.
+
+.. _Build initial DAG:
+
+Initial SelectionDAG Construction
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The initial SelectionDAG is na\ :raw-html:`&iuml;`\ vely peephole expanded from
+the LLVM input by the ``SelectionDAGLowering`` class in the
+``lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp`` file. The intent of this pass
+is to expose as much low-level, target-specific details to the SelectionDAG as
+possible. This pass is mostly hard-coded (e.g. an LLVM ``add`` turns into an
+``SDNode add`` while a ``getelementptr`` is expanded into the obvious
+arithmetic). This pass requires target-specific hooks to lower calls, returns,
+varargs, etc. For these features, the :raw-html:`<tt>` `TargetLowering`_
+:raw-html:`</tt>` interface is used.
+
+.. _legalize types:
+.. _Legalize SelectionDAG Types:
+.. _Legalize SelectionDAG Ops:
+
+SelectionDAG LegalizeTypes Phase
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The Legalize phase is in charge of converting a DAG to only use the types that
+are natively supported by the target.
+
+There are two main ways of converting values of unsupported scalar types to
+values of supported types: converting small types to larger types ("promoting"),
+and breaking up large integer types into smaller ones ("expanding"). For
+example, a target might require that all f32 values are promoted to f64 and that
+all i1/i8/i16 values are promoted to i32. The same target might require that
+all i64 values be expanded into pairs of i32 values. These changes can insert
+sign and zero extensions as needed to make sure that the final code has the same
+behavior as the input.
+
+There are two main ways of converting values of unsupported vector types to
+value of supported types: splitting vector types, multiple times if necessary,
+until a legal type is found, and extending vector types by adding elements to
+the end to round them out to legal types ("widening"). If a vector gets split
+all the way down to single-element parts with no supported vector type being
+found, the elements are converted to scalars ("scalarizing").
+
+A target implementation tells the legalizer which types are supported (and which
+register class to use for them) by calling the ``addRegisterClass`` method in
+its TargetLowering constructor.
+
+.. _legalize operations:
+.. _Legalizer:
+
+SelectionDAG Legalize Phase
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The Legalize phase is in charge of converting a DAG to only use the operations
+that are natively supported by the target.
+
+Targets often have weird constraints, such as not supporting every operation on
+every supported datatype (e.g. X86 does not support byte conditional moves and
+PowerPC does not support sign-extending loads from a 16-bit memory location).
+Legalize takes care of this by open-coding another sequence of operations to
+emulate the operation ("expansion"), by promoting one type to a larger type that
+supports the operation ("promotion"), or by using a target-specific hook to
+implement the legalization ("custom").
+
+A target implementation tells the legalizer which operations are not supported
+(and which of the above three actions to take) by calling the
+``setOperationAction`` method in its ``TargetLowering`` constructor.
+
+Prior to the existence of the Legalize passes, we required that every target
+`selector`_ supported and handled every operator and type even if they are not
+natively supported. The introduction of the Legalize phases allows all of the
+canonicalization patterns to be shared across targets, and makes it very easy to
+optimize the canonicalized code because it is still in the form of a DAG.
+
+.. _optimizations:
+.. _Optimize SelectionDAG:
+.. _selector:
+
+SelectionDAG Optimization Phase: the DAG Combiner
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The SelectionDAG optimization phase is run multiple times for code generation,
+immediately after the DAG is built and once after each legalization. The first
+run of the pass allows the initial code to be cleaned up (e.g. performing
+optimizations that depend on knowing that the operators have restricted type
+inputs). Subsequent runs of the pass clean up the messy code generated by the
+Legalize passes, which allows Legalize to be very simple (it can focus on making
+code legal instead of focusing on generating *good* and legal code).
+
+One important class of optimizations performed is optimizing inserted sign and
+zero extension instructions. We currently use ad-hoc techniques, but could move
+to more rigorous techniques in the future. Here are some good papers on the
+subject:
+
+"`Widening integer arithmetic <http://www.eecs.harvard.edu/~nr/pubs/widen-abstract.html>`_" :raw-html:`<br>`
+Kevin Redwine and Norman Ramsey :raw-html:`<br>`
+International Conference on Compiler Construction (CC) 2004
+
+"`Effective sign extension elimination <http://portal.acm.org/citation.cfm?doid=512529.512552>`_" :raw-html:`<br>`
+Motohiro Kawahito, Hideaki Komatsu, and Toshio Nakatani :raw-html:`<br>`
+Proceedings of the ACM SIGPLAN 2002 Conference on Programming Language Design
+and Implementation.
+
+.. _Select instructions from DAG:
+
+SelectionDAG Select Phase
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The Select phase is the bulk of the target-specific code for instruction
+selection. This phase takes a legal SelectionDAG as input, pattern matches the
+instructions supported by the target to this DAG, and produces a new DAG of
+target code. For example, consider the following LLVM fragment:
+
+.. code-block:: llvm
+
+ %t1 = fadd float %W, %X
+ %t2 = fmul float %t1, %Y
+ %t3 = fadd float %t2, %Z
+
+This LLVM code corresponds to a SelectionDAG that looks basically like this:
+
+.. code-block:: llvm
+
+ (fadd:f32 (fmul:f32 (fadd:f32 W, X), Y), Z)
+
+If a target supports floating point multiply-and-add (FMA) operations, one of
+the adds can be merged with the multiply. On the PowerPC, for example, the
+output of the instruction selector might look like this DAG:
+
+::
+
+ (FMADDS (FADDS W, X), Y, Z)
+
+The ``FMADDS`` instruction is a ternary instruction that multiplies its first
+two operands and adds the third (as single-precision floating-point numbers).
+The ``FADDS`` instruction is a simple binary single-precision add instruction.
+To perform this pattern match, the PowerPC backend includes the following
+instruction definitions:
+
+::
+
+ def FMADDS : AForm_1<59, 29,
+ (ops F4RC:$FRT, F4RC:$FRA, F4RC:$FRC, F4RC:$FRB),
+ "fmadds $FRT, $FRA, $FRC, $FRB",
+ [(set F4RC:$FRT, (fadd (fmul F4RC:$FRA, F4RC:$FRC),
+ F4RC:$FRB))]>;
+ def FADDS : AForm_2<59, 21,
+ (ops F4RC:$FRT, F4RC:$FRA, F4RC:$FRB),
+ "fadds $FRT, $FRA, $FRB",
+ [(set F4RC:$FRT, (fadd F4RC:$FRA, F4RC:$FRB))]>;
+
+The portion of the instruction definition in bold indicates the pattern used to
+match the instruction. The DAG operators (like ``fmul``/``fadd``) are defined
+in the ``include/llvm/Target/TargetSelectionDAG.td`` file. " ``F4RC``" is the
+register class of the input and result values.
+
+The TableGen DAG instruction selector generator reads the instruction patterns
+in the ``.td`` file and automatically builds parts of the pattern matching code
+for your target. It has the following strengths:
+
+* At compiler-compiler time, it analyzes your instruction patterns and tells you
+ if your patterns make sense or not.
+
+* It can handle arbitrary constraints on operands for the pattern match. In
+ particular, it is straight-forward to say things like "match any immediate
+ that is a 13-bit sign-extended value". For examples, see the ``immSExt16``
+ and related ``tblgen`` classes in the PowerPC backend.
+
+* It knows several important identities for the patterns defined. For example,
+ it knows that addition is commutative, so it allows the ``FMADDS`` pattern
+ above to match "``(fadd X, (fmul Y, Z))``" as well as "``(fadd (fmul X, Y),
+ Z)``", without the target author having to specially handle this case.
+
+* It has a full-featured type-inferencing system. In particular, you should
+ rarely have to explicitly tell the system what type parts of your patterns
+ are. In the ``FMADDS`` case above, we didn't have to tell ``tblgen`` that all
+ of the nodes in the pattern are of type 'f32'. It was able to infer and
+ propagate this knowledge from the fact that ``F4RC`` has type 'f32'.
+
+* Targets can define their own (and rely on built-in) "pattern fragments".
+ Pattern fragments are chunks of reusable patterns that get inlined into your
+ patterns during compiler-compiler time. For example, the integer "``(not
+ x)``" operation is actually defined as a pattern fragment that expands as
+ "``(xor x, -1)``", since the SelectionDAG does not have a native '``not``'
+ operation. Targets can define their own short-hand fragments as they see fit.
+ See the definition of '``not``' and '``ineg``' for examples.
+
+* In addition to instructions, targets can specify arbitrary patterns that map
+ to one or more instructions using the 'Pat' class. For example, the PowerPC
+ has no way to load an arbitrary integer immediate into a register in one
+ instruction. To tell tblgen how to do this, it defines:
+
+ ::
+
+ // Arbitrary immediate support. Implement in terms of LIS/ORI.
+ def : Pat<(i32 imm:$imm),
+ (ORI (LIS (HI16 imm:$imm)), (LO16 imm:$imm))>;
+
+ If none of the single-instruction patterns for loading an immediate into a
+ register match, this will be used. This rule says "match an arbitrary i32
+ immediate, turning it into an ``ORI`` ('or a 16-bit immediate') and an ``LIS``
+ ('load 16-bit immediate, where the immediate is shifted to the left 16 bits')
+ instruction". To make this work, the ``LO16``/``HI16`` node transformations
+ are used to manipulate the input immediate (in this case, take the high or low
+ 16-bits of the immediate).
+
+* While the system does automate a lot, it still allows you to write custom C++
+ code to match special cases if there is something that is hard to
+ express.
+
+While it has many strengths, the system currently has some limitations,
+primarily because it is a work in progress and is not yet finished:
+
+* Overall, there is no way to define or match SelectionDAG nodes that define
+ multiple values (e.g. ``SMUL_LOHI``, ``LOAD``, ``CALL``, etc). This is the
+ biggest reason that you currently still *have to* write custom C++ code
+ for your instruction selector.
+
+* There is no great way to support matching complex addressing modes yet. In
+ the future, we will extend pattern fragments to allow them to define multiple
+ values (e.g. the four operands of the `X86 addressing mode`_, which are
+ currently matched with custom C++ code). In addition, we'll extend fragments
+ so that a fragment can match multiple different patterns.
+
+* We don't automatically infer flags like ``isStore``/``isLoad`` yet.
+
+* We don't automatically generate the set of supported registers and operations
+ for the `Legalizer`_ yet.
+
+* We don't have a way of tying in custom legalized nodes yet.
+
+Despite these limitations, the instruction selector generator is still quite
+useful for most of the binary and logical operations in typical instruction
+sets. If you run into any problems or can't figure out how to do something,
+please let Chris know!
+
+.. _Scheduling and Formation:
+.. _SelectionDAG Scheduling and Formation:
+
+SelectionDAG Scheduling and Formation Phase
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The scheduling phase takes the DAG of target instructions from the selection
+phase and assigns an order. The scheduler can pick an order depending on
+various constraints of the machines (i.e. order for minimal register pressure or
+try to cover instruction latencies). Once an order is established, the DAG is
+converted to a list of :raw-html:`<tt>` `MachineInstr`_\s :raw-html:`</tt>` and
+the SelectionDAG is destroyed.
+
+Note that this phase is logically separate from the instruction selection phase,
+but is tied to it closely in the code because it operates on SelectionDAGs.
+
+Future directions for the SelectionDAG
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+#. Optional function-at-a-time selection.
+
+#. Auto-generate entire selector from ``.td`` file.
+
+.. _SSA-based Machine Code Optimizations:
+
+SSA-based Machine Code Optimizations
+------------------------------------
+
+To Be Written
+
+Live Intervals
+--------------
+
+Live Intervals are the ranges (intervals) where a variable is *live*. They are
+used by some `register allocator`_ passes to determine if two or more virtual
+registers which require the same physical register are live at the same point in
+the program (i.e., they conflict). When this situation occurs, one virtual
+register must be *spilled*.
+
+Live Variable Analysis
+^^^^^^^^^^^^^^^^^^^^^^
+
+The first step in determining the live intervals of variables is to calculate
+the set of registers that are immediately dead after the instruction (i.e., the
+instruction calculates the value, but it is never used) and the set of registers
+that are used by the instruction, but are never used after the instruction
+(i.e., they are killed). Live variable information is computed for
+each *virtual* register and *register allocatable* physical register
+in the function. This is done in a very efficient manner because it uses SSA to
+sparsely compute lifetime information for virtual registers (which are in SSA
+form) and only has to track physical registers within a block. Before register
+allocation, LLVM can assume that physical registers are only live within a
+single basic block. This allows it to do a single, local analysis to resolve
+physical register lifetimes within each basic block. If a physical register is
+not register allocatable (e.g., a stack pointer or condition codes), it is not
+tracked.
+
+Physical registers may be live in to or out of a function. Live in values are
+typically arguments in registers. Live out values are typically return values in
+registers. Live in values are marked as such, and are given a dummy "defining"
+instruction during live intervals analysis. If the last basic block of a
+function is a ``return``, then it's marked as using all live out values in the
+function.
+
+``PHI`` nodes need to be handled specially, because the calculation of the live
+variable information from a depth first traversal of the CFG of the function
+won't guarantee that a virtual register used by the ``PHI`` node is defined
+before it's used. When a ``PHI`` node is encountered, only the definition is
+handled, because the uses will be handled in other basic blocks.
+
+For each ``PHI`` node of the current basic block, we simulate an assignment at
+the end of the current basic block and traverse the successor basic blocks. If a
+successor basic block has a ``PHI`` node and one of the ``PHI`` node's operands
+is coming from the current basic block, then the variable is marked as *alive*
+within the current basic block and all of its predecessor basic blocks, until
+the basic block with the defining instruction is encountered.
+
+Live Intervals Analysis
+^^^^^^^^^^^^^^^^^^^^^^^
+
+We now have the information available to perform the live intervals analysis and
+build the live intervals themselves. We start off by numbering the basic blocks
+and machine instructions. We then handle the "live-in" values. These are in
+physical registers, so the physical register is assumed to be killed by the end
+of the basic block. Live intervals for virtual registers are computed for some
+ordering of the machine instructions ``[1, N]``. A live interval is an interval
+``[i, j)``, where ``1 >= i >= j > N``, for which a variable is live.
+
+.. note::
+ More to come...
+
+.. _Register Allocation:
+.. _register allocator:
+
+Register Allocation
+-------------------
+
+The *Register Allocation problem* consists in mapping a program
+:raw-html:`<b><tt>` P\ :sub:`v`\ :raw-html:`</tt></b>`, that can use an unbounded
+number of virtual registers, to a program :raw-html:`<b><tt>` P\ :sub:`p`\
+:raw-html:`</tt></b>` that contains a finite (possibly small) number of physical
+registers. Each target architecture has a different number of physical
+registers. If the number of physical registers is not enough to accommodate all
+the virtual registers, some of them will have to be mapped into memory. These
+virtuals are called *spilled virtuals*.
+
+How registers are represented in LLVM
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In LLVM, physical registers are denoted by integer numbers that normally range
+from 1 to 1023. To see how this numbering is defined for a particular
+architecture, you can read the ``GenRegisterNames.inc`` file for that
+architecture. For instance, by inspecting
+``lib/Target/X86/X86GenRegisterInfo.inc`` we see that the 32-bit register
+``EAX`` is denoted by 43, and the MMX register ``MM0`` is mapped to 65.
+
+Some architectures contain registers that share the same physical location. A
+notable example is the X86 platform. For instance, in the X86 architecture, the
+registers ``EAX``, ``AX`` and ``AL`` share the first eight bits. These physical
+registers are marked as *aliased* in LLVM. Given a particular architecture, you
+can check which registers are aliased by inspecting its ``RegisterInfo.td``
+file. Moreover, the class ``MCRegAliasIterator`` enumerates all the physical
+registers aliased to a register.
+
+Physical registers, in LLVM, are grouped in *Register Classes*. Elements in the
+same register class are functionally equivalent, and can be interchangeably
+used. Each virtual register can only be mapped to physical registers of a
+particular class. For instance, in the X86 architecture, some virtuals can only
+be allocated to 8 bit registers. A register class is described by
+``TargetRegisterClass`` objects. To discover if a virtual register is
+compatible with a given physical, this code can be used:</p>
+
+.. code-block:: c++
+
+ bool RegMapping_Fer::compatible_class(MachineFunction &mf,
+ unsigned v_reg,
+ unsigned p_reg) {
+ assert(TargetRegisterInfo::isPhysicalRegister(p_reg) &&
+ "Target register must be physical");
+ const TargetRegisterClass *trc = mf.getRegInfo().getRegClass(v_reg);
+ return trc->contains(p_reg);
+ }
+
+Sometimes, mostly for debugging purposes, it is useful to change the number of
+physical registers available in the target architecture. This must be done
+statically, inside the ``TargetRegsterInfo.td`` file. Just ``grep`` for
+``RegisterClass``, the last parameter of which is a list of registers. Just
+commenting some out is one simple way to avoid them being used. A more polite
+way is to explicitly exclude some registers from the *allocation order*. See the
+definition of the ``GR8`` register class in
+``lib/Target/X86/X86RegisterInfo.td`` for an example of this.
+
+Virtual registers are also denoted by integer numbers. Contrary to physical
+registers, different virtual registers never share the same number. Whereas
+physical registers are statically defined in a ``TargetRegisterInfo.td`` file
+and cannot be created by the application developer, that is not the case with
+virtual registers. In order to create new virtual registers, use the method
+``MachineRegisterInfo::createVirtualRegister()``. This method will return a new
+virtual register. Use an ``IndexedMap<Foo, VirtReg2IndexFunctor>`` to hold
+information per virtual register. If you need to enumerate all virtual
+registers, use the function ``TargetRegisterInfo::index2VirtReg()`` to find the
+virtual register numbers:
+
+.. code-block:: c++
+
+ for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
+ unsigned VirtReg = TargetRegisterInfo::index2VirtReg(i);
+ stuff(VirtReg);
+ }
+
+Before register allocation, the operands of an instruction are mostly virtual
+registers, although physical registers may also be used. In order to check if a
+given machine operand is a register, use the boolean function
+``MachineOperand::isRegister()``. To obtain the integer code of a register, use
+``MachineOperand::getReg()``. An instruction may define or use a register. For
+instance, ``ADD reg:1026 := reg:1025 reg:1024`` defines the registers 1024, and
+uses registers 1025 and 1026. Given a register operand, the method
+``MachineOperand::isUse()`` informs if that register is being used by the
+instruction. The method ``MachineOperand::isDef()`` informs if that registers is
+being defined.
+
+We will call physical registers present in the LLVM bitcode before register
+allocation *pre-colored registers*. Pre-colored registers are used in many
+different situations, for instance, to pass parameters of functions calls, and
+to store results of particular instructions. There are two types of pre-colored
+registers: the ones *implicitly* defined, and those *explicitly*
+defined. Explicitly defined registers are normal operands, and can be accessed
+with ``MachineInstr::getOperand(int)::getReg()``. In order to check which
+registers are implicitly defined by an instruction, use the
+``TargetInstrInfo::get(opcode)::ImplicitDefs``, where ``opcode`` is the opcode
+of the target instruction. One important difference between explicit and
+implicit physical registers is that the latter are defined statically for each
+instruction, whereas the former may vary depending on the program being
+compiled. For example, an instruction that represents a function call will
+always implicitly define or use the same set of physical registers. To read the
+registers implicitly used by an instruction, use
+``TargetInstrInfo::get(opcode)::ImplicitUses``. Pre-colored registers impose
+constraints on any register allocation algorithm. The register allocator must
+make sure that none of them are overwritten by the values of virtual registers
+while still alive.
+
+Mapping virtual registers to physical registers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+There are two ways to map virtual registers to physical registers (or to memory
+slots). The first way, that we will call *direct mapping*, is based on the use
+of methods of the classes ``TargetRegisterInfo``, and ``MachineOperand``. The
+second way, that we will call *indirect mapping*, relies on the ``VirtRegMap``
+class in order to insert loads and stores sending and getting values to and from
+memory.
+
+The direct mapping provides more flexibility to the developer of the register
+allocator; however, it is more error prone, and demands more implementation
+work. Basically, the programmer will have to specify where load and store
+instructions should be inserted in the target function being compiled in order
+to get and store values in memory. To assign a physical register to a virtual
+register present in a given operand, use ``MachineOperand::setReg(p_reg)``. To
+insert a store instruction, use ``TargetInstrInfo::storeRegToStackSlot(...)``,
+and to insert a load instruction, use ``TargetInstrInfo::loadRegFromStackSlot``.
+
+The indirect mapping shields the application developer from the complexities of
+inserting load and store instructions. In order to map a virtual register to a
+physical one, use ``VirtRegMap::assignVirt2Phys(vreg, preg)``. In order to map
+a certain virtual register to memory, use
+``VirtRegMap::assignVirt2StackSlot(vreg)``. This method will return the stack
+slot where ``vreg``'s value will be located. If it is necessary to map another
+virtual register to the same stack slot, use
+``VirtRegMap::assignVirt2StackSlot(vreg, stack_location)``. One important point
+to consider when using the indirect mapping, is that even if a virtual register
+is mapped to memory, it still needs to be mapped to a physical register. This
+physical register is the location where the virtual register is supposed to be
+found before being stored or after being reloaded.
+
+If the indirect strategy is used, after all the virtual registers have been
+mapped to physical registers or stack slots, it is necessary to use a spiller
+object to place load and store instructions in the code. Every virtual that has
+been mapped to a stack slot will be stored to memory after been defined and will
+be loaded before being used. The implementation of the spiller tries to recycle
+load/store instructions, avoiding unnecessary instructions. For an example of
+how to invoke the spiller, see ``RegAllocLinearScan::runOnMachineFunction`` in
+``lib/CodeGen/RegAllocLinearScan.cpp``.
+
+Handling two address instructions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+With very rare exceptions (e.g., function calls), the LLVM machine code
+instructions are three address instructions. That is, each instruction is
+expected to define at most one register, and to use at most two registers.
+However, some architectures use two address instructions. In this case, the
+defined register is also one of the used register. For instance, an instruction
+such as ``ADD %EAX, %EBX``, in X86 is actually equivalent to ``%EAX = %EAX +
+%EBX``.
+
+In order to produce correct code, LLVM must convert three address instructions
+that represent two address instructions into true two address instructions. LLVM
+provides the pass ``TwoAddressInstructionPass`` for this specific purpose. It
+must be run before register allocation takes place. After its execution, the
+resulting code may no longer be in SSA form. This happens, for instance, in
+situations where an instruction such as ``%a = ADD %b %c`` is converted to two
+instructions such as:
+
+::
+
+ %a = MOVE %b
+ %a = ADD %a %c
+
+Notice that, internally, the second instruction is represented as ``ADD
+%a[def/use] %c``. I.e., the register operand ``%a`` is both used and defined by
+the instruction.
+
+The SSA deconstruction phase
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+An important transformation that happens during register allocation is called
+the *SSA Deconstruction Phase*. The SSA form simplifies many analyses that are
+performed on the control flow graph of programs. However, traditional
+instruction sets do not implement PHI instructions. Thus, in order to generate
+executable code, compilers must replace PHI instructions with other instructions
+that preserve their semantics.
+
+There are many ways in which PHI instructions can safely be removed from the
+target code. The most traditional PHI deconstruction algorithm replaces PHI
+instructions with copy instructions. That is the strategy adopted by LLVM. The
+SSA deconstruction algorithm is implemented in
+``lib/CodeGen/PHIElimination.cpp``. In order to invoke this pass, the identifier
+``PHIEliminationID`` must be marked as required in the code of the register
+allocator.
+
+Instruction folding
+^^^^^^^^^^^^^^^^^^^
+
+*Instruction folding* is an optimization performed during register allocation
+that removes unnecessary copy instructions. For instance, a sequence of
+instructions such as:
+
+::
+
+ %EBX = LOAD %mem_address
+ %EAX = COPY %EBX
+
+can be safely substituted by the single instruction:
+
+::
+
+ %EAX = LOAD %mem_address
+
+Instructions can be folded with the
+``TargetRegisterInfo::foldMemoryOperand(...)`` method. Care must be taken when
+folding instructions; a folded instruction can be quite different from the
+original instruction. See ``LiveIntervals::addIntervalsForSpills`` in
+``lib/CodeGen/LiveIntervalAnalysis.cpp`` for an example of its use.
+
+Built in register allocators
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The LLVM infrastructure provides the application developer with three different
+register allocators:
+
+* *Fast* --- This register allocator is the default for debug builds. It
+ allocates registers on a basic block level, attempting to keep values in
+ registers and reusing registers as appropriate.
+
+* *Basic* --- This is an incremental approach to register allocation. Live
+ ranges are assigned to registers one at a time in an order that is driven by
+ heuristics. Since code can be rewritten on-the-fly during allocation, this
+ framework allows interesting allocators to be developed as extensions. It is
+ not itself a production register allocator but is a potentially useful
+ stand-alone mode for triaging bugs and as a performance baseline.
+
+* *Greedy* --- *The default allocator*. This is a highly tuned implementation of
+ the *Basic* allocator that incorporates global live range splitting. This
+ allocator works hard to minimize the cost of spill code.
+
+* *PBQP* --- A Partitioned Boolean Quadratic Programming (PBQP) based register
+ allocator. This allocator works by constructing a PBQP problem representing
+ the register allocation problem under consideration, solving this using a PBQP
+ solver, and mapping the solution back to a register assignment.
+
+The type of register allocator used in ``llc`` can be chosen with the command
+line option ``-regalloc=...``:
+
+.. code-block:: bash
+
+ $ llc -regalloc=linearscan file.bc -o ln.s
+ $ llc -regalloc=fast file.bc -o fa.s
+ $ llc -regalloc=pbqp file.bc -o pbqp.s
+
+.. _Prolog/Epilog Code Insertion:
+
+Prolog/Epilog Code Insertion
+----------------------------
+
+Compact Unwind
+
+Throwing an exception requires *unwinding* out of a function. The information on
+how to unwind a given function is traditionally expressed in DWARF unwind
+(a.k.a. frame) info. But that format was originally developed for debuggers to
+backtrace, and each Frame Description Entry (FDE) requires ~20-30 bytes per
+function. There is also the cost of mapping from an address in a function to the
+corresponding FDE at runtime. An alternative unwind encoding is called *compact
+unwind* and requires just 4-bytes per function.
+
+The compact unwind encoding is a 32-bit value, which is encoded in an
+architecture-specific way. It specifies which registers to restore and from
+where, and how to unwind out of the function. When the linker creates a final
+linked image, it will create a ``__TEXT,__unwind_info`` section. This section is
+a small and fast way for the runtime to access unwind info for any given
+function. If we emit compact unwind info for the function, that compact unwind
+info will be encoded in the ``__TEXT,__unwind_info`` section. If we emit DWARF
+unwind info, the ``__TEXT,__unwind_info`` section will contain the offset of the
+FDE in the ``__TEXT,__eh_frame`` section in the final linked image.
+
+For X86, there are three modes for the compact unwind encoding:
+
+*Function with a Frame Pointer (``EBP`` or ``RBP``)*
+ ``EBP/RBP``-based frame, where ``EBP/RBP`` is pushed onto the stack
+ immediately after the return address, then ``ESP/RSP`` is moved to
+ ``EBP/RBP``. Thus to unwind, ``ESP/RSP`` is restored with the current
+ ``EBP/RBP`` value, then ``EBP/RBP`` is restored by popping the stack, and the
+ return is done by popping the stack once more into the PC. All non-volatile
+ registers that need to be restored must have been saved in a small range on
+ the stack that starts ``EBP-4`` to ``EBP-1020`` (``RBP-8`` to
+ ``RBP-1020``). The offset (divided by 4 in 32-bit mode and 8 in 64-bit mode)
+ is encoded in bits 16-23 (mask: ``0x00FF0000``). The registers saved are
+ encoded in bits 0-14 (mask: ``0x00007FFF``) as five 3-bit entries from the
+ following table:
+
+ ============== ============= ===============
+ Compact Number i386 Register x86-64 Register
+ ============== ============= ===============
+ 1 ``EBX`` ``RBX``
+ 2 ``ECX`` ``R12``
+ 3 ``EDX`` ``R13``
+ 4 ``EDI`` ``R14``
+ 5 ``ESI`` ``R15``
+ 6 ``EBP`` ``RBP``
+ ============== ============= ===============
+
+*Frameless with a Small Constant Stack Size (``EBP`` or ``RBP`` is not used as a frame pointer)*
+ To return, a constant (encoded in the compact unwind encoding) is added to the
+ ``ESP/RSP``. Then the return is done by popping the stack into the PC. All
+ non-volatile registers that need to be restored must have been saved on the
+ stack immediately after the return address. The stack size (divided by 4 in
+ 32-bit mode and 8 in 64-bit mode) is encoded in bits 16-23 (mask:
+ ``0x00FF0000``). There is a maximum stack size of 1024 bytes in 32-bit mode
+ and 2048 in 64-bit mode. The number of registers saved is encoded in bits 9-12
+ (mask: ``0x00001C00``). Bits 0-9 (mask: ``0x000003FF``) contain which
+ registers were saved and their order. (See the
+ ``encodeCompactUnwindRegistersWithoutFrame()`` function in
+ ``lib/Target/X86FrameLowering.cpp`` for the encoding algorithm.)
+
+*Frameless with a Large Constant Stack Size (``EBP`` or ``RBP`` is not used as a frame pointer)*
+ This case is like the "Frameless with a Small Constant Stack Size" case, but
+ the stack size is too large to encode in the compact unwind encoding. Instead
+ it requires that the function contains "``subl $nnnnnn, %esp``" in its
+ prolog. The compact encoding contains the offset to the ``$nnnnnn`` value in
+ the function in bits 9-12 (mask: ``0x00001C00``).
+
+.. _Late Machine Code Optimizations:
+
+Late Machine Code Optimizations
+-------------------------------
+
+.. note::
+
+ To Be Written
+
+.. _Code Emission:
+
+Code Emission
+-------------
+
+The code emission step of code generation is responsible for lowering from the
+code generator abstractions (like `MachineFunction`_, `MachineInstr`_, etc) down
+to the abstractions used by the MC layer (`MCInst`_, `MCStreamer`_, etc). This
+is done with a combination of several different classes: the (misnamed)
+target-independent AsmPrinter class, target-specific subclasses of AsmPrinter
+(such as SparcAsmPrinter), and the TargetLoweringObjectFile class.
+
+Since the MC layer works at the level of abstraction of object files, it doesn't
+have a notion of functions, global variables etc. Instead, it thinks about
+labels, directives, and instructions. A key class used at this time is the
+MCStreamer class. This is an abstract API that is implemented in different ways
+(e.g. to output a .s file, output an ELF .o file, etc) that is effectively an
+"assembler API". MCStreamer has one method per directive, such as EmitLabel,
+EmitSymbolAttribute, SwitchSection, etc, which directly correspond to assembly
+level directives.
+
+If you are interested in implementing a code generator for a target, there are
+three important things that you have to implement for your target:
+
+#. First, you need a subclass of AsmPrinter for your target. This class
+ implements the general lowering process converting MachineFunction's into MC
+ label constructs. The AsmPrinter base class provides a number of useful
+ methods and routines, and also allows you to override the lowering process in
+ some important ways. You should get much of the lowering for free if you are
+ implementing an ELF, COFF, or MachO target, because the
+ TargetLoweringObjectFile class implements much of the common logic.
+
+#. Second, you need to implement an instruction printer for your target. The
+ instruction printer takes an `MCInst`_ and renders it to a raw_ostream as
+ text. Most of this is automatically generated from the .td file (when you
+ specify something like "``add $dst, $src1, $src2``" in the instructions), but
+ you need to implement routines to print operands.
+
+#. Third, you need to implement code that lowers a `MachineInstr`_ to an MCInst,
+ usually implemented in "<target>MCInstLower.cpp". This lowering process is
+ often target specific, and is responsible for turning jump table entries,
+ constant pool indices, global variable addresses, etc into MCLabels as
+ appropriate. This translation layer is also responsible for expanding pseudo
+ ops used by the code generator into the actual machine instructions they
+ correspond to. The MCInsts that are generated by this are fed into the
+ instruction printer or the encoder.
+
+Finally, at your choosing, you can also implement an subclass of MCCodeEmitter
+which lowers MCInst's into machine code bytes and relocations. This is
+important if you want to support direct .o file emission, or would like to
+implement an assembler for your target.
+
+VLIW Packetizer
+---------------
+
+In a Very Long Instruction Word (VLIW) architecture, the compiler is responsible
+for mapping instructions to functional-units available on the architecture. To
+that end, the compiler creates groups of instructions called *packets* or
+*bundles*. The VLIW packetizer in LLVM is a target-independent mechanism to
+enable the packetization of machine instructions.
+
+Mapping from instructions to functional units
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Instructions in a VLIW target can typically be mapped to multiple functional
+units. During the process of packetizing, the compiler must be able to reason
+about whether an instruction can be added to a packet. This decision can be
+complex since the compiler has to examine all possible mappings of instructions
+to functional units. Therefore to alleviate compilation-time complexity, the
+VLIW packetizer parses the instruction classes of a target and generates tables
+at compiler build time. These tables can then be queried by the provided
+machine-independent API to determine if an instruction can be accommodated in a
+packet.
+
+How the packetization tables are generated and used
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The packetizer reads instruction classes from a target's itineraries and creates
+a deterministic finite automaton (DFA) to represent the state of a packet. A DFA
+consists of three major elements: inputs, states, and transitions. The set of
+inputs for the generated DFA represents the instruction being added to a
+packet. The states represent the possible consumption of functional units by
+instructions in a packet. In the DFA, transitions from one state to another
+occur on the addition of an instruction to an existing packet. If there is a
+legal mapping of functional units to instructions, then the DFA contains a
+corresponding transition. The absence of a transition indicates that a legal
+mapping does not exist and that the instruction cannot be added to the packet.
+
+To generate tables for a VLIW target, add *Target*\ GenDFAPacketizer.inc as a
+target to the Makefile in the target directory. The exported API provides three
+functions: ``DFAPacketizer::clearResources()``,
+``DFAPacketizer::reserveResources(MachineInstr *MI)``, and
+``DFAPacketizer::canReserveResources(MachineInstr *MI)``. These functions allow
+a target packetizer to add an instruction to an existing packet and to check
+whether an instruction can be added to a packet. See
+``llvm/CodeGen/DFAPacketizer.h`` for more information.
+
+Implementing a Native Assembler
+===============================
+
+Though you're probably reading this because you want to write or maintain a
+compiler backend, LLVM also fully supports building a native assemblers too.
+We've tried hard to automate the generation of the assembler from the .td files
+(in particular the instruction syntax and encodings), which means that a large
+part of the manual and repetitive data entry can be factored and shared with the
+compiler.
+
+Instruction Parsing
+-------------------
+
+.. note::
+
+ To Be Written
+
+
+Instruction Alias Processing
+----------------------------
+
+Once the instruction is parsed, it enters the MatchInstructionImpl function.
+The MatchInstructionImpl function performs alias processing and then does actual
+matching.
+
+Alias processing is the phase that canonicalizes different lexical forms of the
+same instructions down to one representation. There are several different kinds
+of alias that are possible to implement and they are listed below in the order
+that they are processed (which is in order from simplest/weakest to most
+complex/powerful). Generally you want to use the first alias mechanism that
+meets the needs of your instruction, because it will allow a more concise
+description.
+
+Mnemonic Aliases
+^^^^^^^^^^^^^^^^
+
+The first phase of alias processing is simple instruction mnemonic remapping for
+classes of instructions which are allowed with two different mnemonics. This
+phase is a simple and unconditionally remapping from one input mnemonic to one
+output mnemonic. It isn't possible for this form of alias to look at the
+operands at all, so the remapping must apply for all forms of a given mnemonic.
+Mnemonic aliases are defined simply, for example X86 has:
+
+::
+
+ def : MnemonicAlias<"cbw", "cbtw">;
+ def : MnemonicAlias<"smovq", "movsq">;
+ def : MnemonicAlias<"fldcww", "fldcw">;
+ def : MnemonicAlias<"fucompi", "fucomip">;
+ def : MnemonicAlias<"ud2a", "ud2">;
+
+... and many others. With a MnemonicAlias definition, the mnemonic is remapped
+simply and directly. Though MnemonicAlias's can't look at any aspect of the
+instruction (such as the operands) they can depend on global modes (the same
+ones supported by the matcher), through a Requires clause:
+
+::
+
+ def : MnemonicAlias<"pushf", "pushfq">, Requires<[In64BitMode]>;
+ def : MnemonicAlias<"pushf", "pushfl">, Requires<[In32BitMode]>;
+
+In this example, the mnemonic gets mapped into different a new one depending on
+the current instruction set.
+
+Instruction Aliases
+^^^^^^^^^^^^^^^^^^^
+
+The most general phase of alias processing occurs while matching is happening:
+it provides new forms for the matcher to match along with a specific instruction
+to generate. An instruction alias has two parts: the string to match and the
+instruction to generate. For example:
+
+::
+
+ def : InstAlias<"movsx $src, $dst", (MOVSX16rr8W GR16:$dst, GR8 :$src)>;
+ def : InstAlias<"movsx $src, $dst", (MOVSX16rm8W GR16:$dst, i8mem:$src)>;
+ def : InstAlias<"movsx $src, $dst", (MOVSX32rr8 GR32:$dst, GR8 :$src)>;
+ def : InstAlias<"movsx $src, $dst", (MOVSX32rr16 GR32:$dst, GR16 :$src)>;
+ def : InstAlias<"movsx $src, $dst", (MOVSX64rr8 GR64:$dst, GR8 :$src)>;
+ def : InstAlias<"movsx $src, $dst", (MOVSX64rr16 GR64:$dst, GR16 :$src)>;
+ def : InstAlias<"movsx $src, $dst", (MOVSX64rr32 GR64:$dst, GR32 :$src)>;
+
+This shows a powerful example of the instruction aliases, matching the same
+mnemonic in multiple different ways depending on what operands are present in
+the assembly. The result of instruction aliases can include operands in a
+different order than the destination instruction, and can use an input multiple
+times, for example:
+
+::
+
+ def : InstAlias<"clrb $reg", (XOR8rr GR8 :$reg, GR8 :$reg)>;
+ def : InstAlias<"clrw $reg", (XOR16rr GR16:$reg, GR16:$reg)>;
+ def : InstAlias<"clrl $reg", (XOR32rr GR32:$reg, GR32:$reg)>;
+ def : InstAlias<"clrq $reg", (XOR64rr GR64:$reg, GR64:$reg)>;
+
+This example also shows that tied operands are only listed once. In the X86
+backend, XOR8rr has two input GR8's and one output GR8 (where an input is tied
+to the output). InstAliases take a flattened operand list without duplicates
+for tied operands. The result of an instruction alias can also use immediates
+and fixed physical registers which are added as simple immediate operands in the
+result, for example:
+
+::
+
+ // Fixed Immediate operand.
+ def : InstAlias<"aad", (AAD8i8 10)>;
+
+ // Fixed register operand.
+ def : InstAlias<"fcomi", (COM_FIr ST1)>;
+
+ // Simple alias.
+ def : InstAlias<"fcomi $reg", (COM_FIr RST:$reg)>;
+
+Instruction aliases can also have a Requires clause to make them subtarget
+specific.
+
+If the back-end supports it, the instruction printer can automatically emit the
+alias rather than what's being aliased. It typically leads to better, more
+readable code. If it's better to print out what's being aliased, then pass a '0'
+as the third parameter to the InstAlias definition.
+
+Instruction Matching
+--------------------
+
+.. note::
+
+ To Be Written
+
+.. _Implementations of the abstract target description interfaces:
+.. _implement the target description:
+
+Target-specific Implementation Notes
+====================================
+
+This section of the document explains features or design decisions that are
+specific to the code generator for a particular target. First we start with a
+table that summarizes what features are supported by each target.
+
+Target Feature Matrix
+---------------------
+
+Note that this table does not include the C backend or Cpp backends, since they
+do not use the target independent code generator infrastructure. It also
+doesn't list features that are not supported fully by any target yet. It
+considers a feature to be supported if at least one subtarget supports it. A
+feature being supported means that it is useful and works for most cases, it
+does not indicate that there are zero known bugs in the implementation. Here is
+the key:
+
+:raw-html:`<table border="1" cellspacing="0">`
+:raw-html:`<tr>`
+:raw-html:`<th>Unknown</th>`
+:raw-html:`<th>No support</th>`
+:raw-html:`<th>Partial Support</th>`
+:raw-html:`<th>Complete Support</th>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td class="unknown"></td>`
+:raw-html:`<td class="no"></td>`
+:raw-html:`<td class="partial"></td>`
+:raw-html:`<td class="yes"></td>`
+:raw-html:`</tr>`
+:raw-html:`</table>`
+
+Here is the table:
+
+:raw-html:`<table width="689" border="1" cellspacing="0">`
+:raw-html:`<tr><td></td>`
+:raw-html:`<td colspan="13" align="center" style="background-color:#ffc">Target</td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<th>Feature</th>`
+:raw-html:`<th>ARM</th>`
+:raw-html:`<th>CellSPU</th>`
+:raw-html:`<th>Hexagon</th>`
+:raw-html:`<th>MBlaze</th>`
+:raw-html:`<th>MSP430</th>`
+:raw-html:`<th>Mips</th>`
+:raw-html:`<th>PTX</th>`
+:raw-html:`<th>PowerPC</th>`
+:raw-html:`<th>Sparc</th>`
+:raw-html:`<th>X86</th>`
+:raw-html:`<th>XCore</th>`
+:raw-html:`</tr>`
+
+:raw-html:`<tr>`
+:raw-html:`<td><a href="#feat_reliable">is generally reliable</a></td>`
+:raw-html:`<td class="yes"></td> <!-- ARM -->`
+:raw-html:`<td class="no"></td> <!-- CellSPU -->`
+:raw-html:`<td class="yes"></td> <!-- Hexagon -->`
+:raw-html:`<td class="no"></td> <!-- MBlaze -->`
+:raw-html:`<td class="unknown"></td> <!-- MSP430 -->`
+:raw-html:`<td class="yes"></td> <!-- Mips -->`
+:raw-html:`<td class="no"></td> <!-- PTX -->`
+:raw-html:`<td class="yes"></td> <!-- PowerPC -->`
+:raw-html:`<td class="yes"></td> <!-- Sparc -->`
+:raw-html:`<td class="yes"></td> <!-- X86 -->`
+:raw-html:`<td class="unknown"></td> <!-- XCore -->`
+:raw-html:`</tr>`
+
+:raw-html:`<tr>`
+:raw-html:`<td><a href="#feat_asmparser">assembly parser</a></td>`
+:raw-html:`<td class="no"></td> <!-- ARM -->`
+:raw-html:`<td class="no"></td> <!-- CellSPU -->`
+:raw-html:`<td class="no"></td> <!-- Hexagon -->`
+:raw-html:`<td class="yes"></td> <!-- MBlaze -->`
+:raw-html:`<td class="no"></td> <!-- MSP430 -->`
+:raw-html:`<td class="no"></td> <!-- Mips -->`
+:raw-html:`<td class="no"></td> <!-- PTX -->`
+:raw-html:`<td class="no"></td> <!-- PowerPC -->`
+:raw-html:`<td class="no"></td> <!-- Sparc -->`
+:raw-html:`<td class="yes"></td> <!-- X86 -->`
+:raw-html:`<td class="no"></td> <!-- XCore -->`
+:raw-html:`</tr>`
+
+:raw-html:`<tr>`
+:raw-html:`<td><a href="#feat_disassembler">disassembler</a></td>`
+:raw-html:`<td class="yes"></td> <!-- ARM -->`
+:raw-html:`<td class="no"></td> <!-- CellSPU -->`
+:raw-html:`<td class="no"></td> <!-- Hexagon -->`
+:raw-html:`<td class="yes"></td> <!-- MBlaze -->`
+:raw-html:`<td class="no"></td> <!-- MSP430 -->`
+:raw-html:`<td class="no"></td> <!-- Mips -->`
+:raw-html:`<td class="no"></td> <!-- PTX -->`
+:raw-html:`<td class="no"></td> <!-- PowerPC -->`
+:raw-html:`<td class="no"></td> <!-- Sparc -->`
+:raw-html:`<td class="yes"></td> <!-- X86 -->`
+:raw-html:`<td class="no"></td> <!-- XCore -->`
+:raw-html:`</tr>`
+
+:raw-html:`<tr>`
+:raw-html:`<td><a href="#feat_inlineasm">inline asm</a></td>`
+:raw-html:`<td class="yes"></td> <!-- ARM -->`
+:raw-html:`<td class="no"></td> <!-- CellSPU -->`
+:raw-html:`<td class="yes"></td> <!-- Hexagon -->`
+:raw-html:`<td class="yes"></td> <!-- MBlaze -->`
+:raw-html:`<td class="unknown"></td> <!-- MSP430 -->`
+:raw-html:`<td class="no"></td> <!-- Mips -->`
+:raw-html:`<td class="unknown"></td> <!-- PTX -->`
+:raw-html:`<td class="yes"></td> <!-- PowerPC -->`
+:raw-html:`<td class="unknown"></td> <!-- Sparc -->`
+:raw-html:`<td class="yes"></td> <!-- X86 -->`
+:raw-html:`<td class="unknown"></td> <!-- XCore -->`
+:raw-html:`</tr>`
+
+:raw-html:`<tr>`
+:raw-html:`<td><a href="#feat_jit">jit</a></td>`
+:raw-html:`<td class="partial"><a href="#feat_jit_arm">*</a></td> <!-- ARM -->`
+:raw-html:`<td class="no"></td> <!-- CellSPU -->`
+:raw-html:`<td class="no"></td> <!-- Hexagon -->`
+:raw-html:`<td class="no"></td> <!-- MBlaze -->`
+:raw-html:`<td class="unknown"></td> <!-- MSP430 -->`
+:raw-html:`<td class="yes"></td> <!-- Mips -->`
+:raw-html:`<td class="unknown"></td> <!-- PTX -->`
+:raw-html:`<td class="yes"></td> <!-- PowerPC -->`
+:raw-html:`<td class="unknown"></td> <!-- Sparc -->`
+:raw-html:`<td class="yes"></td> <!-- X86 -->`
+:raw-html:`<td class="unknown"></td> <!-- XCore -->`
+:raw-html:`</tr>`
+
+:raw-html:`<tr>`
+:raw-html:`<td><a href="#feat_objectwrite">.o&nbsp;file writing</a></td>`
+:raw-html:`<td class="no"></td> <!-- ARM -->`
+:raw-html:`<td class="no"></td> <!-- CellSPU -->`
+:raw-html:`<td class="no"></td> <!-- Hexagon -->`
+:raw-html:`<td class="yes"></td> <!-- MBlaze -->`
+:raw-html:`<td class="no"></td> <!-- MSP430 -->`
+:raw-html:`<td class="no"></td> <!-- Mips -->`
+:raw-html:`<td class="no"></td> <!-- PTX -->`
+:raw-html:`<td class="no"></td> <!-- PowerPC -->`
+:raw-html:`<td class="no"></td> <!-- Sparc -->`
+:raw-html:`<td class="yes"></td> <!-- X86 -->`
+:raw-html:`<td class="no"></td> <!-- XCore -->`
+:raw-html:`</tr>`
+
+:raw-html:`<tr>`
+:raw-html:`<td><a hr:raw-html:`ef="#feat_tailcall">tail calls</a></td>`
+:raw-html:`<td class="yes"></td> <!-- ARM -->`
+:raw-html:`<td class="no"></td> <!-- CellSPU -->`
+:raw-html:`<td class="yes"></td> <!-- Hexagon -->`
+:raw-html:`<td class="no"></td> <!-- MBlaze -->`
+:raw-html:`<td class="unknown"></td> <!-- MSP430 -->`
+:raw-html:`<td class="no"></td> <!-- Mips -->`
+:raw-html:`<td class="unknown"></td> <!-- PTX -->`
+:raw-html:`<td class="yes"></td> <!-- PowerPC -->`
+:raw-html:`<td class="unknown"></td> <!-- Sparc -->`
+:raw-html:`<td class="yes"></td> <!-- X86 -->`
+:raw-html:`<td class="unknown"></td> <!-- XCore -->`
+:raw-html:`</tr>`
+
+:raw-html:`<tr>`
+:raw-html:`<td><a href="#feat_segstacks">segmented stacks</a></td>`
+:raw-html:`<td class="no"></td> <!-- ARM -->`
+:raw-html:`<td class="no"></td> <!-- CellSPU -->`
+:raw-html:`<td class="no"></td> <!-- Hexagon -->`
+:raw-html:`<td class="no"></td> <!-- MBlaze -->`
+:raw-html:`<td class="no"></td> <!-- MSP430 -->`
+:raw-html:`<td class="no"></td> <!-- Mips -->`
+:raw-html:`<td class="no"></td> <!-- PTX -->`
+:raw-html:`<td class="no"></td> <!-- PowerPC -->`
+:raw-html:`<td class="no"></td> <!-- Sparc -->`
+:raw-html:`<td class="partial"><a href="#feat_segstacks_x86">*</a></td> <!-- X86 -->`
+:raw-html:`<td class="no"></td> <!-- XCore -->`
+:raw-html:`</tr>`
+
+:raw-html:`</table>`
+
+.. _feat_reliable:
+
+Is Generally Reliable
+^^^^^^^^^^^^^^^^^^^^^
+
+This box indicates whether the target is considered to be production quality.
+This indicates that the target has been used as a static compiler to compile
+large amounts of code by a variety of different people and is in continuous use.
+
+.. _feat_asmparser:
+
+Assembly Parser
+^^^^^^^^^^^^^^^
+
+This box indicates whether the target supports parsing target specific .s files
+by implementing the MCAsmParser interface. This is required for llvm-mc to be
+able to act as a native assembler and is required for inline assembly support in
+the native .o file writer.
+
+.. _feat_disassembler:
+
+Disassembler
+^^^^^^^^^^^^
+
+This box indicates whether the target supports the MCDisassembler API for
+disassembling machine opcode bytes into MCInst's.
+
+.. _feat_inlineasm:
+
+Inline Asm
+^^^^^^^^^^
+
+This box indicates whether the target supports most popular inline assembly
+constraints and modifiers.
+
+.. _feat_jit:
+
+JIT Support
+^^^^^^^^^^^
+
+This box indicates whether the target supports the JIT compiler through the
+ExecutionEngine interface.
+
+.. _feat_jit_arm:
+
+The ARM backend has basic support for integer code in ARM codegen mode, but
+lacks NEON and full Thumb support.
+
+.. _feat_objectwrite:
+
+.o File Writing
+^^^^^^^^^^^^^^^
+
+This box indicates whether the target supports writing .o files (e.g. MachO,
+ELF, and/or COFF) files directly from the target. Note that the target also
+must include an assembly parser and general inline assembly support for full
+inline assembly support in the .o writer.
+
+Targets that don't support this feature can obviously still write out .o files,
+they just rely on having an external assembler to translate from a .s file to a
+.o file (as is the case for many C compilers).
+
+.. _feat_tailcall:
+
+Tail Calls
+^^^^^^^^^^
+
+This box indicates whether the target supports guaranteed tail calls. These are
+calls marked "`tail <LangRef.html#i_call>`_" and use the fastcc calling
+convention. Please see the `tail call section more more details`_.
+
+.. _feat_segstacks:
+
+Segmented Stacks
+^^^^^^^^^^^^^^^^
+
+This box indicates whether the target supports segmented stacks. This replaces
+the traditional large C stack with many linked segments. It is compatible with
+the `gcc implementation <http://gcc.gnu.org/wiki/SplitStacks>`_ used by the Go
+front end.
+
+.. _feat_segstacks_x86:
+
+Basic support exists on the X86 backend. Currently vararg doesn't work and the
+object files are not marked the way the gold linker expects, but simple Go
+programs can be built by dragonegg.
+
+.. _tail call section more more details:
+
+Tail call optimization
+----------------------
+
+Tail call optimization, callee reusing the stack of the caller, is currently
+supported on x86/x86-64 and PowerPC. It is performed if:
+
+* Caller and callee have the calling convention ``fastcc`` or ``cc 10`` (GHC
+ call convention).
+
+* The call is a tail call - in tail position (ret immediately follows call and
+ ret uses value of call or is void).
+
+* Option ``-tailcallopt`` is enabled.
+
+* Platform specific constraints are met.
+
+x86/x86-64 constraints:
+
+* No variable argument lists are used.
+
+* On x86-64 when generating GOT/PIC code only module-local calls (visibility =
+ hidden or protected) are supported.
+
+PowerPC constraints:
+
+* No variable argument lists are used.
+
+* No byval parameters are used.
+
+* On ppc32/64 GOT/PIC only module-local calls (visibility = hidden or protected)
+ are supported.
+
+Example:
+
+Call as ``llc -tailcallopt test.ll``.
+
+.. code-block:: llvm
+
+ declare fastcc i32 @tailcallee(i32 inreg %a1, i32 inreg %a2, i32 %a3, i32 %a4)
+
+ define fastcc i32 @tailcaller(i32 %in1, i32 %in2) {
+ %l1 = add i32 %in1, %in2
+ %tmp = tail call fastcc i32 @tailcallee(i32 %in1 inreg, i32 %in2 inreg, i32 %in1, i32 %l1)
+ ret i32 %tmp
+ }
+
+Implications of ``-tailcallopt``:
+
+To support tail call optimization in situations where the callee has more
+arguments than the caller a 'callee pops arguments' convention is used. This
+currently causes each ``fastcc`` call that is not tail call optimized (because
+one or more of above constraints are not met) to be followed by a readjustment
+of the stack. So performance might be worse in such cases.
+
+Sibling call optimization
+-------------------------
+
+Sibling call optimization is a restricted form of tail call optimization.
+Unlike tail call optimization described in the previous section, it can be
+performed automatically on any tail calls when ``-tailcallopt`` option is not
+specified.
+
+Sibling call optimization is currently performed on x86/x86-64 when the
+following constraints are met:
+
+* Caller and callee have the same calling convention. It can be either ``c`` or
+ ``fastcc``.
+
+* The call is a tail call - in tail position (ret immediately follows call and
+ ret uses value of call or is void).
+
+* Caller and callee have matching return type or the callee result is not used.
+
+* If any of the callee arguments are being passed in stack, they must be
+ available in caller's own incoming argument stack and the frame offsets must
+ be the same.
+
+Example:
+
+.. code-block:: llvm
+
+ declare i32 @bar(i32, i32)
+
+ define i32 @foo(i32 %a, i32 %b, i32 %c) {
+ entry:
+ %0 = tail call i32 @bar(i32 %a, i32 %b)
+ ret i32 %0
+ }
+
+The X86 backend
+---------------
+
+The X86 code generator lives in the ``lib/Target/X86`` directory. This code
+generator is capable of targeting a variety of x86-32 and x86-64 processors, and
+includes support for ISA extensions such as MMX and SSE.
+
+X86 Target Triples supported
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The following are the known target triples that are supported by the X86
+backend. This is not an exhaustive list, and it would be useful to add those
+that people test.
+
+* **i686-pc-linux-gnu** --- Linux
+
+* **i386-unknown-freebsd5.3** --- FreeBSD 5.3
+
+* **i686-pc-cygwin** --- Cygwin on Win32
+
+* **i686-pc-mingw32** --- MingW on Win32
+
+* **i386-pc-mingw32msvc** --- MingW crosscompiler on Linux
+
+* **i686-apple-darwin*** --- Apple Darwin on X86
+
+* **x86_64-unknown-linux-gnu** --- Linux
+
+X86 Calling Conventions supported
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The following target-specific calling conventions are known to backend:
+
+* **x86_StdCall** --- stdcall calling convention seen on Microsoft Windows
+ platform (CC ID = 64).
+
+* **x86_FastCall** --- fastcall calling convention seen on Microsoft Windows
+ platform (CC ID = 65).
+
+* **x86_ThisCall** --- Similar to X86_StdCall. Passes first argument in ECX,
+ others via stack. Callee is responsible for stack cleaning. This convention is
+ used by MSVC by default for methods in its ABI (CC ID = 70).
+
+.. _X86 addressing mode:
+
+Representing X86 addressing modes in MachineInstrs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The x86 has a very flexible way of accessing memory. It is capable of forming
+memory addresses of the following expression directly in integer instructions
+(which use ModR/M addressing):
+
+::
+
+ SegmentReg: Base + [1,2,4,8] * IndexReg + Disp32
+
+In order to represent this, LLVM tracks no less than 5 operands for each memory
+operand of this form. This means that the "load" form of '``mov``' has the
+following ``MachineOperand``\s in this order:
+
+::
+
+ Index: 0 | 1 2 3 4 5
+ Meaning: DestReg, | BaseReg, Scale, IndexReg, Displacement Segment
+ OperandTy: VirtReg, | VirtReg, UnsImm, VirtReg, SignExtImm PhysReg
+
+Stores, and all other instructions, treat the four memory operands in the same
+way and in the same order. If the segment register is unspecified (regno = 0),
+then no segment override is generated. "Lea" operations do not have a segment
+register specified, so they only have 4 operands for their memory reference.
+
+X86 address spaces supported
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+x86 has a feature which provides the ability to perform loads and stores to
+different address spaces via the x86 segment registers. A segment override
+prefix byte on an instruction causes the instruction's memory access to go to
+the specified segment. LLVM address space 0 is the default address space, which
+includes the stack, and any unqualified memory accesses in a program. Address
+spaces 1-255 are currently reserved for user-defined code. The GS-segment is
+represented by address space 256, while the FS-segment is represented by address
+space 257. Other x86 segments have yet to be allocated address space
+numbers.
+
+While these address spaces may seem similar to TLS via the ``thread_local``
+keyword, and often use the same underlying hardware, there are some fundamental
+differences.
+
+The ``thread_local`` keyword applies to global variables and specifies that they
+are to be allocated in thread-local memory. There are no type qualifiers
+involved, and these variables can be pointed to with normal pointers and
+accessed with normal loads and stores. The ``thread_local`` keyword is
+target-independent at the LLVM IR level (though LLVM doesn't yet have
+implementations of it for some configurations)
+
+Special address spaces, in contrast, apply to static types. Every load and store
+has a particular address space in its address operand type, and this is what
+determines which address space is accessed. LLVM ignores these special address
+space qualifiers on global variables, and does not provide a way to directly
+allocate storage in them. At the LLVM IR level, the behavior of these special
+address spaces depends in part on the underlying OS or runtime environment, and
+they are specific to x86 (and LLVM doesn't yet handle them correctly in some
+cases).
+
+Some operating systems and runtime environments use (or may in the future use)
+the FS/GS-segment registers for various low-level purposes, so care should be
+taken when considering them.
+
+Instruction naming
+^^^^^^^^^^^^^^^^^^
+
+An instruction name consists of the base name, a default operand size, and a a
+character per operand with an optional special size. For example:
+
+::
+
+ ADD8rr -> add, 8-bit register, 8-bit register
+ IMUL16rmi -> imul, 16-bit register, 16-bit memory, 16-bit immediate
+ IMUL16rmi8 -> imul, 16-bit register, 16-bit memory, 8-bit immediate
+ MOVSX32rm16 -> movsx, 32-bit register, 16-bit memory
+
+The PowerPC backend
+-------------------
+
+The PowerPC code generator lives in the lib/Target/PowerPC directory. The code
+generation is retargetable to several variations or *subtargets* of the PowerPC
+ISA; including ppc32, ppc64 and altivec.
+
+LLVM PowerPC ABI
+^^^^^^^^^^^^^^^^
+
+LLVM follows the AIX PowerPC ABI, with two deviations. LLVM uses a PC relative
+(PIC) or static addressing for accessing global values, so no TOC (r2) is
+used. Second, r31 is used as a frame pointer to allow dynamic growth of a stack
+frame. LLVM takes advantage of having no TOC to provide space to save the frame
+pointer in the PowerPC linkage area of the caller frame. Other details of
+PowerPC ABI can be found at `PowerPC ABI
+<http://developer.apple.com/documentation/DeveloperTools/Conceptual/LowLevelABI/Articles/32bitPowerPC.html>`_\
+. Note: This link describes the 32 bit ABI. The 64 bit ABI is similar except
+space for GPRs are 8 bytes wide (not 4) and r13 is reserved for system use.
+
+Frame Layout
+^^^^^^^^^^^^
+
+The size of a PowerPC frame is usually fixed for the duration of a function's
+invocation. Since the frame is fixed size, all references into the frame can be
+accessed via fixed offsets from the stack pointer. The exception to this is
+when dynamic alloca or variable sized arrays are present, then a base pointer
+(r31) is used as a proxy for the stack pointer and stack pointer is free to grow
+or shrink. A base pointer is also used if llvm-gcc is not passed the
+-fomit-frame-pointer flag. The stack pointer is always aligned to 16 bytes, so
+that space allocated for altivec vectors will be properly aligned.
+
+An invocation frame is laid out as follows (low memory at top):
+
+:raw-html:`<table border="1" cellspacing="0">`
+:raw-html:`<tr>`
+:raw-html:`<td>Linkage<br><br></td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>Parameter area<br><br></td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>Dynamic area<br><br></td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>Locals area<br><br></td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>Saved registers area<br><br></td>`
+:raw-html:`</tr>`
+:raw-html:`<tr style="border-style: none hidden none hidden;">`
+:raw-html:`<td><br></td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>Previous Frame<br><br></td>`
+:raw-html:`</tr>`
+:raw-html:`</table>`
+
+The *linkage* area is used by a callee to save special registers prior to
+allocating its own frame. Only three entries are relevant to LLVM. The first
+entry is the previous stack pointer (sp), aka link. This allows probing tools
+like gdb or exception handlers to quickly scan the frames in the stack. A
+function epilog can also use the link to pop the frame from the stack. The
+third entry in the linkage area is used to save the return address from the lr
+register. Finally, as mentioned above, the last entry is used to save the
+previous frame pointer (r31.) The entries in the linkage area are the size of a
+GPR, thus the linkage area is 24 bytes long in 32 bit mode and 48 bytes in 64
+bit mode.
+
+32 bit linkage area:
+
+:raw-html:`<table border="1" cellspacing="0">`
+:raw-html:`<tr>`
+:raw-html:`<td>0</td>`
+:raw-html:`<td>Saved SP (r1)</td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>4</td>`
+:raw-html:`<td>Saved CR</td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>8</td>`
+:raw-html:`<td>Saved LR</td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>12</td>`
+:raw-html:`<td>Reserved</td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>16</td>`
+:raw-html:`<td>Reserved</td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>20</td>`
+:raw-html:`<td>Saved FP (r31)</td>`
+:raw-html:`</tr>`
+:raw-html:`</table>`
+
+64 bit linkage area:
+
+:raw-html:`<table border="1" cellspacing="0">`
+:raw-html:`<tr>`
+:raw-html:`<td>0</td>`
+:raw-html:`<td>Saved SP (r1)</td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>8</td>`
+:raw-html:`<td>Saved CR</td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>16</td>`
+:raw-html:`<td>Saved LR</td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>24</td>`
+:raw-html:`<td>Reserved</td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>32</td>`
+:raw-html:`<td>Reserved</td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>40</td>`
+:raw-html:`<td>Saved FP (r31)</td>`
+:raw-html:`</tr>`
+:raw-html:`</table>`
+
+The *parameter area* is used to store arguments being passed to a callee
+function. Following the PowerPC ABI, the first few arguments are actually
+passed in registers, with the space in the parameter area unused. However, if
+there are not enough registers or the callee is a thunk or vararg function,
+these register arguments can be spilled into the parameter area. Thus, the
+parameter area must be large enough to store all the parameters for the largest
+call sequence made by the caller. The size must also be minimally large enough
+to spill registers r3-r10. This allows callees blind to the call signature,
+such as thunks and vararg functions, enough space to cache the argument
+registers. Therefore, the parameter area is minimally 32 bytes (64 bytes in 64
+bit mode.) Also note that since the parameter area is a fixed offset from the
+top of the frame, that a callee can access its spilt arguments using fixed
+offsets from the stack pointer (or base pointer.)
+
+Combining the information about the linkage, parameter areas and alignment. A
+stack frame is minimally 64 bytes in 32 bit mode and 128 bytes in 64 bit mode.
+
+The *dynamic area* starts out as size zero. If a function uses dynamic alloca
+then space is added to the stack, the linkage and parameter areas are shifted to
+top of stack, and the new space is available immediately below the linkage and
+parameter areas. The cost of shifting the linkage and parameter areas is minor
+since only the link value needs to be copied. The link value can be easily
+fetched by adding the original frame size to the base pointer. Note that
+allocations in the dynamic space need to observe 16 byte alignment.
+
+The *locals area* is where the llvm compiler reserves space for local variables.
+
+The *saved registers area* is where the llvm compiler spills callee saved
+registers on entry to the callee.
+
+Prolog/Epilog
+^^^^^^^^^^^^^
+
+The llvm prolog and epilog are the same as described in the PowerPC ABI, with
+the following exceptions. Callee saved registers are spilled after the frame is
+created. This allows the llvm epilog/prolog support to be common with other
+targets. The base pointer callee saved register r31 is saved in the TOC slot of
+linkage area. This simplifies allocation of space for the base pointer and
+makes it convenient to locate programatically and during debugging.
+
+Dynamic Allocation
+^^^^^^^^^^^^^^^^^^
+
+.. note::
+
+ TODO - More to come.
+
+The PTX backend
+---------------
+
+The PTX code generator lives in the lib/Target/PTX directory. It is currently a
+work-in-progress, but already supports most of the code generation functionality
+needed to generate correct PTX kernels for CUDA devices.
+
+The code generator can target PTX 2.0+, and shader model 1.0+. The PTX ISA
+Reference Manual is used as the primary source of ISA information, though an
+effort is made to make the output of the code generator match the output of the
+NVidia nvcc compiler, whenever possible.
+
+Code Generator Options:
+
+:raw-html:`<table border="1" cellspacing="0">`
+:raw-html:`<tr>`
+:raw-html:`<th>Option</th>`
+:raw-html:`<th>Description</th>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>``double``</td>`
+:raw-html:`<td align="left">If enabled, the map_f64_to_f32 directive is disabled in the PTX output, allowing native double-precision arithmetic</td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>``no-fma``</td>`
+:raw-html:`<td align="left">Disable generation of Fused-Multiply Add instructions, which may be beneficial for some devices</td>`
+:raw-html:`</tr>`
+:raw-html:`<tr>`
+:raw-html:`<td>``smxy / computexy``</td>`
+:raw-html:`<td align="left">Set shader model/compute capability to x.y, e.g. sm20 or compute13</td>`
+:raw-html:`</tr>`
+:raw-html:`</table>`
+
+Working:
+
+* Arithmetic instruction selection (including combo FMA)
+
+* Bitwise instruction selection
+
+* Control-flow instruction selection
+
+* Function calls (only on SM 2.0+ and no return arguments)
+
+* Addresses spaces (0 = global, 1 = constant, 2 = local, 4 = shared)
+
+* Thread synchronization (bar.sync)
+
+* Special register reads ([N]TID, [N]CTAID, PMx, CLOCK, etc.)
+
+In Progress:
+
+* Robust call instruction selection
+
+* Stack frame allocation
+
+* Device-specific instruction scheduling optimizations
diff --git a/docs/CodingStandards.rst b/docs/CodingStandards.rst
new file mode 100644
index 00000000000..4d16e2a9bd6
--- /dev/null
+++ b/docs/CodingStandards.rst
@@ -0,0 +1,1175 @@
+.. _coding_standards:
+
+=====================
+LLVM Coding Standards
+=====================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+This document attempts to describe a few coding standards that are being used in
+the LLVM source tree. Although no coding standards should be regarded as
+absolute requirements to be followed in all instances, coding standards are
+particularly important for large-scale code bases that follow a library-based
+design (like LLVM).
+
+This document intentionally does not prescribe fixed standards for religious
+issues such as brace placement and space usage. For issues like this, follow
+the golden rule:
+
+.. _Golden Rule:
+
+ **If you are extending, enhancing, or bug fixing already implemented code,
+ use the style that is already being used so that the source is uniform and
+ easy to follow.**
+
+Note that some code bases (e.g. ``libc++``) have really good reasons to deviate
+from the coding standards. In the case of ``libc++``, this is because the
+naming and other conventions are dictated by the C++ standard. If you think
+there is a specific good reason to deviate from the standards here, please bring
+it up on the LLVMdev mailing list.
+
+There are some conventions that are not uniformly followed in the code base
+(e.g. the naming convention). This is because they are relatively new, and a
+lot of code was written before they were put in place. Our long term goal is
+for the entire codebase to follow the convention, but we explicitly *do not*
+want patches that do large-scale reformating of existing code. On the other
+hand, it is reasonable to rename the methods of a class if you're about to
+change it in some other way. Just do the reformating as a separate commit from
+the functionality change.
+
+The ultimate goal of these guidelines is the increase readability and
+maintainability of our common source base. If you have suggestions for topics to
+be included, please mail them to `Chris <mailto:sabre@nondot.org>`_.
+
+Mechanical Source Issues
+========================
+
+Source Code Formatting
+----------------------
+
+Commenting
+^^^^^^^^^^
+
+Comments are one critical part of readability and maintainability. Everyone
+knows they should comment their code, and so should you. When writing comments,
+write them as English prose, which means they should use proper capitalization,
+punctuation, etc. Aim to describe what the code is trying to do and why, not
+*how* it does it at a micro level. Here are a few critical things to document:
+
+.. _header file comment:
+
+File Headers
+""""""""""""
+
+Every source file should have a header on it that describes the basic purpose of
+the file. If a file does not have a header, it should not be checked into the
+tree. The standard header looks like this:
+
+.. code-block:: c++
+
+ //===-- llvm/Instruction.h - Instruction class definition -------*- C++ -*-===//
+ //
+ // The LLVM Compiler Infrastructure
+ //
+ // This file is distributed under the University of Illinois Open Source
+ // License. See LICENSE.TXT for details.
+ //
+ //===----------------------------------------------------------------------===//
+ //
+ // This file contains the declaration of the Instruction class, which is the
+ // base class for all of the VM instructions.
+ //
+ //===----------------------------------------------------------------------===//
+
+A few things to note about this particular format: The "``-*- C++ -*-``" string
+on the first line is there to tell Emacs that the source file is a C++ file, not
+a C file (Emacs assumes ``.h`` files are C files by default).
+
+.. note::
+
+ This tag is not necessary in ``.cpp`` files. The name of the file is also
+ on the first line, along with a very short description of the purpose of the
+ file. This is important when printing out code and flipping though lots of
+ pages.
+
+The next section in the file is a concise note that defines the license that the
+file is released under. This makes it perfectly clear what terms the source
+code can be distributed under and should not be modified in any way.
+
+The main body of the description does not have to be very long in most cases.
+Here it's only two lines. If an algorithm is being implemented or something
+tricky is going on, a reference to the paper where it is published should be
+included, as well as any notes or *gotchas* in the code to watch out for.
+
+Class overviews
+"""""""""""""""
+
+Classes are one fundamental part of a good object oriented design. As such, a
+class definition should have a comment block that explains what the class is
+used for and how it works. Every non-trivial class is expected to have a
+``doxygen`` comment block.
+
+Method information
+""""""""""""""""""
+
+Methods defined in a class (as well as any global functions) should also be
+documented properly. A quick note about what it does and a description of the
+borderline behaviour is all that is necessary here (unless something
+particularly tricky or insidious is going on). The hope is that people can
+figure out how to use your interfaces without reading the code itself.
+
+Good things to talk about here are what happens when something unexpected
+happens: does the method return null? Abort? Format your hard disk?
+
+Comment Formatting
+^^^^^^^^^^^^^^^^^^
+
+In general, prefer C++ style (``//``) comments. They take less space, require
+less typing, don't have nesting problems, etc. There are a few cases when it is
+useful to use C style (``/* */``) comments however:
+
+#. When writing C code: Obviously if you are writing C code, use C style
+ comments.
+
+#. When writing a header file that may be ``#include``\d by a C source file.
+
+#. When writing a source file that is used by a tool that only accepts C style
+ comments.
+
+To comment out a large block of code, use ``#if 0`` and ``#endif``. These nest
+properly and are better behaved in general than C style comments.
+
+``#include`` Style
+^^^^^^^^^^^^^^^^^^
+
+Immediately after the `header file comment`_ (and include guards if working on a
+header file), the `minimal list of #includes`_ required by the file should be
+listed. We prefer these ``#include``\s to be listed in this order:
+
+.. _Main Module Header:
+.. _Local/Private Headers:
+
+#. Main Module Header
+#. Local/Private Headers
+#. ``llvm/*``
+#. ``llvm/Analysis/*``
+#. ``llvm/Assembly/*``
+#. ``llvm/Bitcode/*``
+#. ``llvm/CodeGen/*``
+#. ...
+#. ``llvm/Support/*``
+#. ``llvm/Config/*``
+#. System ``#include``\s
+
+and each category should be sorted by name.
+
+The `Main Module Header`_ file applies to ``.cpp`` files which implement an
+interface defined by a ``.h`` file. This ``#include`` should always be included
+**first** regardless of where it lives on the file system. By including a
+header file first in the ``.cpp`` files that implement the interfaces, we ensure
+that the header does not have any hidden dependencies which are not explicitly
+``#include``\d in the header, but should be. It is also a form of documentation
+in the ``.cpp`` file to indicate where the interfaces it implements are defined.
+
+.. _fit into 80 columns:
+
+Source Code Width
+^^^^^^^^^^^^^^^^^
+
+Write your code to fit within 80 columns of text. This helps those of us who
+like to print out code and look at your code in an ``xterm`` without resizing
+it.
+
+The longer answer is that there must be some limit to the width of the code in
+order to reasonably allow developers to have multiple files side-by-side in
+windows on a modest display. If you are going to pick a width limit, it is
+somewhat arbitrary but you might as well pick something standard. Going with 90
+columns (for example) instead of 80 columns wouldn't add any significant value
+and would be detrimental to printing out code. Also many other projects have
+standardized on 80 columns, so some people have already configured their editors
+for it (vs something else, like 90 columns).
+
+This is one of many contentious issues in coding standards, but it is not up for
+debate.
+
+Use Spaces Instead of Tabs
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In all cases, prefer spaces to tabs in source files. People have different
+preferred indentation levels, and different styles of indentation that they
+like; this is fine. What isn't fine is that different editors/viewers expand
+tabs out to different tab stops. This can cause your code to look completely
+unreadable, and it is not worth dealing with.
+
+As always, follow the `Golden Rule`_ above: follow the style of
+existing code if you are modifying and extending it. If you like four spaces of
+indentation, **DO NOT** do that in the middle of a chunk of code with two spaces
+of indentation. Also, do not reindent a whole source file: it makes for
+incredible diffs that are absolutely worthless.
+
+Indent Code Consistently
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Okay, in your first year of programming you were told that indentation is
+important. If you didn't believe and internalize this then, now is the time.
+Just do it.
+
+Compiler Issues
+---------------
+
+Treat Compiler Warnings Like Errors
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If your code has compiler warnings in it, something is wrong --- you aren't
+casting values correctly, you have "questionable" constructs in your code, or
+you are doing something legitimately wrong. Compiler warnings can cover up
+legitimate errors in output and make dealing with a translation unit difficult.
+
+It is not possible to prevent all warnings from all compilers, nor is it
+desirable. Instead, pick a standard compiler (like ``gcc``) that provides a
+good thorough set of warnings, and stick to it. At least in the case of
+``gcc``, it is possible to work around any spurious errors by changing the
+syntax of the code slightly. For example, a warning that annoys me occurs when
+I write code like this:
+
+.. code-block:: c++
+
+ if (V = getValue()) {
+ ...
+ }
+
+``gcc`` will warn me that I probably want to use the ``==`` operator, and that I
+probably mistyped it. In most cases, I haven't, and I really don't want the
+spurious errors. To fix this particular problem, I rewrite the code like
+this:
+
+.. code-block:: c++
+
+ if ((V = getValue())) {
+ ...
+ }
+
+which shuts ``gcc`` up. Any ``gcc`` warning that annoys you can be fixed by
+massaging the code appropriately.
+
+Write Portable Code
+^^^^^^^^^^^^^^^^^^^
+
+In almost all cases, it is possible and within reason to write completely
+portable code. If there are cases where it isn't possible to write portable
+code, isolate it behind a well defined (and well documented) interface.
+
+In practice, this means that you shouldn't assume much about the host compiler
+(and Visual Studio tends to be the lowest common denominator). If advanced
+features are used, they should only be an implementation detail of a library
+which has a simple exposed API, and preferably be buried in ``libSystem``.
+
+Do not use RTTI or Exceptions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In an effort to reduce code and executable size, LLVM does not use RTTI
+(e.g. ``dynamic_cast<>;``) or exceptions. These two language features violate
+the general C++ principle of *"you only pay for what you use"*, causing
+executable bloat even if exceptions are never used in the code base, or if RTTI
+is never used for a class. Because of this, we turn them off globally in the
+code.
+
+That said, LLVM does make extensive use of a hand-rolled form of RTTI that use
+templates like `isa<>, cast<>, and dyn_cast<> <ProgrammersManual.html#isa>`_.
+This form of RTTI is opt-in and can be added to any class. It is also
+substantially more efficient than ``dynamic_cast<>``.
+
+.. _static constructor:
+
+Do not use Static Constructors
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Static constructors and destructors (e.g. global variables whose types have a
+constructor or destructor) should not be added to the code base, and should be
+removed wherever possible. Besides `well known problems
+<http://yosefk.com/c++fqa/ctors.html#fqa-10.12>`_ where the order of
+initialization is undefined between globals in different source files, the
+entire concept of static constructors is at odds with the common use case of
+LLVM as a library linked into a larger application.
+
+Consider the use of LLVM as a JIT linked into another application (perhaps for
+`OpenGL, custom languages <http://llvm.org/Users.html>`_, `shaders in movies
+<http://llvm.org/devmtg/2010-11/Gritz-OpenShadingLang.pdf>`_, etc). Due to the
+design of static constructors, they must be executed at startup time of the
+entire application, regardless of whether or how LLVM is used in that larger
+application. There are two problems with this:
+
+* The time to run the static constructors impacts startup time of applications
+ --- a critical time for GUI apps, among others.
+
+* The static constructors cause the app to pull many extra pages of memory off
+ the disk: both the code for the constructor in each ``.o`` file and the small
+ amount of data that gets touched. In addition, touched/dirty pages put more
+ pressure on the VM system on low-memory machines.
+
+We would really like for there to be zero cost for linking in an additional LLVM
+target or other library into an application, but static constructors violate
+this goal.
+
+That said, LLVM unfortunately does contain static constructors. It would be a
+`great project <http://llvm.org/PR11944>`_ for someone to purge all static
+constructors from LLVM, and then enable the ``-Wglobal-constructors`` warning
+flag (when building with Clang) to ensure we do not regress in the future.
+
+Use of ``class`` and ``struct`` Keywords
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In C++, the ``class`` and ``struct`` keywords can be used almost
+interchangeably. The only difference is when they are used to declare a class:
+``class`` makes all members private by default while ``struct`` makes all
+members public by default.
+
+Unfortunately, not all compilers follow the rules and some will generate
+different symbols based on whether ``class`` or ``struct`` was used to declare
+the symbol. This can lead to problems at link time.
+
+So, the rule for LLVM is to always use the ``class`` keyword, unless **all**
+members are public and the type is a C++ `POD
+<http://en.wikipedia.org/wiki/Plain_old_data_structure>`_ type, in which case
+``struct`` is allowed.
+
+Style Issues
+============
+
+The High-Level Issues
+---------------------
+
+A Public Header File **is** a Module
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+C++ doesn't do too well in the modularity department. There is no real
+encapsulation or data hiding (unless you use expensive protocol classes), but it
+is what we have to work with. When you write a public header file (in the LLVM
+source tree, they live in the top level "``include``" directory), you are
+defining a module of functionality.
+
+Ideally, modules should be completely independent of each other, and their
+header files should only ``#include`` the absolute minimum number of headers
+possible. A module is not just a class, a function, or a namespace: it's a
+collection of these that defines an interface. This interface may be several
+functions, classes, or data structures, but the important issue is how they work
+together.
+
+In general, a module should be implemented by one or more ``.cpp`` files. Each
+of these ``.cpp`` files should include the header that defines their interface
+first. This ensures that all of the dependences of the module header have been
+properly added to the module header itself, and are not implicit. System
+headers should be included after user headers for a translation unit.
+
+.. _minimal list of #includes:
+
+``#include`` as Little as Possible
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``#include`` hurts compile time performance. Don't do it unless you have to,
+especially in header files.
+
+But wait! Sometimes you need to have the definition of a class to use it, or to
+inherit from it. In these cases go ahead and ``#include`` that header file. Be
+aware however that there are many cases where you don't need to have the full
+definition of a class. If you are using a pointer or reference to a class, you
+don't need the header file. If you are simply returning a class instance from a
+prototyped function or method, you don't need it. In fact, for most cases, you
+simply don't need the definition of a class. And not ``#include``\ing speeds up
+compilation.
+
+It is easy to try to go too overboard on this recommendation, however. You
+**must** include all of the header files that you are using --- you can include
+them either directly or indirectly through another header file. To make sure
+that you don't accidentally forget to include a header file in your module
+header, make sure to include your module header **first** in the implementation
+file (as mentioned above). This way there won't be any hidden dependencies that
+you'll find out about later.
+
+Keep "Internal" Headers Private
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Many modules have a complex implementation that causes them to use more than one
+implementation (``.cpp``) file. It is often tempting to put the internal
+communication interface (helper classes, extra functions, etc) in the public
+module header file. Don't do this!
+
+If you really need to do something like this, put a private header file in the
+same directory as the source files, and include it locally. This ensures that
+your private interface remains private and undisturbed by outsiders.
+
+.. note::
+
+ It's okay to put extra implementation methods in a public class itself. Just
+ make them private (or protected) and all is well.
+
+.. _early exits:
+
+Use Early Exits and ``continue`` to Simplify Code
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When reading code, keep in mind how much state and how many previous decisions
+have to be remembered by the reader to understand a block of code. Aim to
+reduce indentation where possible when it doesn't make it more difficult to
+understand the code. One great way to do this is by making use of early exits
+and the ``continue`` keyword in long loops. As an example of using an early
+exit from a function, consider this "bad" code:
+
+.. code-block:: c++
+
+ Value *DoSomething(Instruction *I) {
+ if (!isa<TerminatorInst>(I) &&
+ I->hasOneUse() && SomeOtherThing(I)) {
+ ... some long code ....
+ }
+
+ return 0;
+ }
+
+This code has several problems if the body of the ``'if'`` is large. When
+you're looking at the top of the function, it isn't immediately clear that this
+*only* does interesting things with non-terminator instructions, and only
+applies to things with the other predicates. Second, it is relatively difficult
+to describe (in comments) why these predicates are important because the ``if``
+statement makes it difficult to lay out the comments. Third, when you're deep
+within the body of the code, it is indented an extra level. Finally, when
+reading the top of the function, it isn't clear what the result is if the
+predicate isn't true; you have to read to the end of the function to know that
+it returns null.
+
+It is much preferred to format the code like this:
+
+.. code-block:: c++
+
+ Value *DoSomething(Instruction *I) {
+ // Terminators never need 'something' done to them because ...
+ if (isa<TerminatorInst>(I))
+ return 0;
+
+ // We conservatively avoid transforming instructions with multiple uses
+ // because goats like cheese.
+ if (!I->hasOneUse())
+ return 0;
+
+ // This is really just here for example.
+ if (!SomeOtherThing(I))
+ return 0;
+
+ ... some long code ....
+ }
+
+This fixes these problems. A similar problem frequently happens in ``for``
+loops. A silly example is something like this:
+
+.. code-block:: c++
+
+ for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(II)) {
+ Value *LHS = BO->getOperand(0);
+ Value *RHS = BO->getOperand(1);
+ if (LHS != RHS) {
+ ...
+ }
+ }
+ }
+
+When you have very, very small loops, this sort of structure is fine. But if it
+exceeds more than 10-15 lines, it becomes difficult for people to read and
+understand at a glance. The problem with this sort of code is that it gets very
+nested very quickly. Meaning that the reader of the code has to keep a lot of
+context in their brain to remember what is going immediately on in the loop,
+because they don't know if/when the ``if`` conditions will have ``else``\s etc.
+It is strongly preferred to structure the loop like this:
+
+.. code-block:: c++
+
+ for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
+ BinaryOperator *BO = dyn_cast<BinaryOperator>(II);
+ if (!BO) continue;
+
+ Value *LHS = BO->getOperand(0);
+ Value *RHS = BO->getOperand(1);
+ if (LHS == RHS) continue;
+
+ ...
+ }
+
+This has all the benefits of using early exits for functions: it reduces nesting
+of the loop, it makes it easier to describe why the conditions are true, and it
+makes it obvious to the reader that there is no ``else`` coming up that they
+have to push context into their brain for. If a loop is large, this can be a
+big understandability win.
+
+Don't use ``else`` after a ``return``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+For similar reasons above (reduction of indentation and easier reading), please
+do not use ``'else'`` or ``'else if'`` after something that interrupts control
+flow --- like ``return``, ``break``, ``continue``, ``goto``, etc. For
+example, this is *bad*:
+
+.. code-block:: c++
+
+ case 'J': {
+ if (Signed) {
+ Type = Context.getsigjmp_bufType();
+ if (Type.isNull()) {
+ Error = ASTContext::GE_Missing_sigjmp_buf;
+ return QualType();
+ } else {
+ break;
+ }
+ } else {
+ Type = Context.getjmp_bufType();
+ if (Type.isNull()) {
+ Error = ASTContext::GE_Missing_jmp_buf;
+ return QualType();
+ } else {
+ break;
+ }
+ }
+ }
+
+It is better to write it like this:
+
+.. code-block:: c++
+
+ case 'J':
+ if (Signed) {
+ Type = Context.getsigjmp_bufType();
+ if (Type.isNull()) {
+ Error = ASTContext::GE_Missing_sigjmp_buf;
+ return QualType();
+ }
+ } else {
+ Type = Context.getjmp_bufType();
+ if (Type.isNull()) {
+ Error = ASTContext::GE_Missing_jmp_buf;
+ return QualType();
+ }
+ }
+ break;
+
+Or better yet (in this case) as:
+
+.. code-block:: c++
+
+ case 'J':
+ if (Signed)
+ Type = Context.getsigjmp_bufType();
+ else
+ Type = Context.getjmp_bufType();
+
+ if (Type.isNull()) {
+ Error = Signed ? ASTContext::GE_Missing_sigjmp_buf :
+ ASTContext::GE_Missing_jmp_buf;
+ return QualType();
+ }
+ break;
+
+The idea is to reduce indentation and the amount of code you have to keep track
+of when reading the code.
+
+Turn Predicate Loops into Predicate Functions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+It is very common to write small loops that just compute a boolean value. There
+are a number of ways that people commonly write these, but an example of this
+sort of thing is:
+
+.. code-block:: c++
+
+ bool FoundFoo = false;
+ for (unsigned i = 0, e = BarList.size(); i != e; ++i)
+ if (BarList[i]->isFoo()) {
+ FoundFoo = true;
+ break;
+ }
+
+ if (FoundFoo) {
+ ...
+ }
+
+This sort of code is awkward to write, and is almost always a bad sign. Instead
+of this sort of loop, we strongly prefer to use a predicate function (which may
+be `static`_) that uses `early exits`_ to compute the predicate. We prefer the
+code to be structured like this:
+
+.. code-block:: c++
+
+ /// ListContainsFoo - Return true if the specified list has an element that is
+ /// a foo.
+ static bool ListContainsFoo(const std::vector<Bar*> &List) {
+ for (unsigned i = 0, e = List.size(); i != e; ++i)
+ if (List[i]->isFoo())
+ return true;
+ return false;
+ }
+ ...
+
+ if (ListContainsFoo(BarList)) {
+ ...
+ }
+
+There are many reasons for doing this: it reduces indentation and factors out
+code which can often be shared by other code that checks for the same predicate.
+More importantly, it *forces you to pick a name* for the function, and forces
+you to write a comment for it. In this silly example, this doesn't add much
+value. However, if the condition is complex, this can make it a lot easier for
+the reader to understand the code that queries for this predicate. Instead of
+being faced with the in-line details of how we check to see if the BarList
+contains a foo, we can trust the function name and continue reading with better
+locality.
+
+The Low-Level Issues
+--------------------
+
+Name Types, Functions, Variables, and Enumerators Properly
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Poorly-chosen names can mislead the reader and cause bugs. We cannot stress
+enough how important it is to use *descriptive* names. Pick names that match
+the semantics and role of the underlying entities, within reason. Avoid
+abbreviations unless they are well known. After picking a good name, make sure
+to use consistent capitalization for the name, as inconsistency requires clients
+to either memorize the APIs or to look it up to find the exact spelling.
+
+In general, names should be in camel case (e.g. ``TextFileReader`` and
+``isLValue()``). Different kinds of declarations have different rules:
+
+* **Type names** (including classes, structs, enums, typedefs, etc) should be
+ nouns and start with an upper-case letter (e.g. ``TextFileReader``).
+
+* **Variable names** should be nouns (as they represent state). The name should
+ be camel case, and start with an upper case letter (e.g. ``Leader`` or
+ ``Boats``).
+
+* **Function names** should be verb phrases (as they represent actions), and
+ command-like function should be imperative. The name should be camel case,
+ and start with a lower case letter (e.g. ``openFile()`` or ``isFoo()``).
+
+* **Enum declarations** (e.g. ``enum Foo {...}``) are types, so they should
+ follow the naming conventions for types. A common use for enums is as a
+ discriminator for a union, or an indicator of a subclass. When an enum is
+ used for something like this, it should have a ``Kind`` suffix
+ (e.g. ``ValueKind``).
+
+* **Enumerators** (e.g. ``enum { Foo, Bar }``) and **public member variables**
+ should start with an upper-case letter, just like types. Unless the
+ enumerators are defined in their own small namespace or inside a class,
+ enumerators should have a prefix corresponding to the enum declaration name.
+ For example, ``enum ValueKind { ... };`` may contain enumerators like
+ ``VK_Argument``, ``VK_BasicBlock``, etc. Enumerators that are just
+ convenience constants are exempt from the requirement for a prefix. For
+ instance:
+
+ .. code-block:: c++
+
+ enum {
+ MaxSize = 42,
+ Density = 12
+ };
+
+As an exception, classes that mimic STL classes can have member names in STL's
+style of lower-case words separated by underscores (e.g. ``begin()``,
+``push_back()``, and ``empty()``).
+
+Here are some examples of good and bad names:
+
+.. code-block:: c++
+
+ class VehicleMaker {
+ ...
+ Factory<Tire> F; // Bad -- abbreviation and non-descriptive.
+ Factory<Tire> Factory; // Better.
+ Factory<Tire> TireFactory; // Even better -- if VehicleMaker has more than one
+ // kind of factories.
+ };
+
+ Vehicle MakeVehicle(VehicleType Type) {
+ VehicleMaker M; // Might be OK if having a short life-span.
+ Tire tmp1 = M.makeTire(); // Bad -- 'tmp1' provides no information.
+ Light headlight = M.makeLight("head"); // Good -- descriptive.
+ ...
+ }
+
+Assert Liberally
+^^^^^^^^^^^^^^^^
+
+Use the "``assert``" macro to its fullest. Check all of your preconditions and
+assumptions, you never know when a bug (not necessarily even yours) might be
+caught early by an assertion, which reduces debugging time dramatically. The
+"``<cassert>``" header file is probably already included by the header files you
+are using, so it doesn't cost anything to use it.
+
+To further assist with debugging, make sure to put some kind of error message in
+the assertion statement, which is printed if the assertion is tripped. This
+helps the poor debugger make sense of why an assertion is being made and
+enforced, and hopefully what to do about it. Here is one complete example:
+
+.. code-block:: c++
+
+ inline Value *getOperand(unsigned i) {
+ assert(i < Operands.size() &amp;&amp; "getOperand() out of range!");
+ return Operands[i];
+ }
+
+Here are more examples:
+
+.. code-block:: c++
+
+ assert(Ty->isPointerType() && "Can't allocate a non pointer type!");
+
+ assert((Opcode == Shl || Opcode == Shr) && "ShiftInst Opcode invalid!");
+
+ assert(idx < getNumSuccessors() && "Successor # out of range!");
+
+ assert(V1.getType() == V2.getType() && "Constant types must be identical!");
+
+ assert(isa<PHINode>(Succ->front()) && "Only works on PHId BBs!");
+
+You get the idea.
+
+Please be aware that, when adding assert statements, not all compilers are aware
+of the semantics of the assert. In some places, asserts are used to indicate a
+piece of code that should not be reached. These are typically of the form:
+
+.. code-block:: c++
+
+ assert(0 && "Some helpful error message");
+
+When used in a function that returns a value, they should be followed with a
+return statement and a comment indicating that this line is never reached. This
+will prevent a compiler which is unable to deduce that the assert statement
+never returns from generating a warning.
+
+.. code-block:: c++
+
+ assert(0 && "Some helpful error message");
+ return 0;
+
+Another issue is that values used only by assertions will produce an "unused
+value" warning when assertions are disabled. For example, this code will warn:
+
+.. code-block:: c++
+
+ unsigned Size = V.size();
+ assert(Size > 42 && "Vector smaller than it should be");
+
+ bool NewToSet = Myset.insert(Value);
+ assert(NewToSet && "The value shouldn't be in the set yet");
+
+These are two interesting different cases. In the first case, the call to
+``V.size()`` is only useful for the assert, and we don't want it executed when
+assertions are disabled. Code like this should move the call into the assert
+itself. In the second case, the side effects of the call must happen whether
+the assert is enabled or not. In this case, the value should be cast to void to
+disable the warning. To be specific, it is preferred to write the code like
+this:
+
+.. code-block:: c++
+
+ assert(V.size() > 42 && "Vector smaller than it should be");
+
+ bool NewToSet = Myset.insert(Value); (void)NewToSet;
+ assert(NewToSet && "The value shouldn't be in the set yet");
+
+Do Not Use ``using namespace std``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In LLVM, we prefer to explicitly prefix all identifiers from the standard
+namespace with an "``std::``" prefix, rather than rely on "``using namespace
+std;``".
+
+In header files, adding a ``'using namespace XXX'`` directive pollutes the
+namespace of any source file that ``#include``\s the header. This is clearly a
+bad thing.
+
+In implementation files (e.g. ``.cpp`` files), the rule is more of a stylistic
+rule, but is still important. Basically, using explicit namespace prefixes
+makes the code **clearer**, because it is immediately obvious what facilities
+are being used and where they are coming from. And **more portable**, because
+namespace clashes cannot occur between LLVM code and other namespaces. The
+portability rule is important because different standard library implementations
+expose different symbols (potentially ones they shouldn't), and future revisions
+to the C++ standard will add more symbols to the ``std`` namespace. As such, we
+never use ``'using namespace std;'`` in LLVM.
+
+The exception to the general rule (i.e. it's not an exception for the ``std``
+namespace) is for implementation files. For example, all of the code in the
+LLVM project implements code that lives in the 'llvm' namespace. As such, it is
+ok, and actually clearer, for the ``.cpp`` files to have a ``'using namespace
+llvm;'`` directive at the top, after the ``#include``\s. This reduces
+indentation in the body of the file for source editors that indent based on
+braces, and keeps the conceptual context cleaner. The general form of this rule
+is that any ``.cpp`` file that implements code in any namespace may use that
+namespace (and its parents'), but should not use any others.
+
+Provide a Virtual Method Anchor for Classes in Headers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If a class is defined in a header file and has a vtable (either it has virtual
+methods or it derives from classes with virtual methods), it must always have at
+least one out-of-line virtual method in the class. Without this, the compiler
+will copy the vtable and RTTI into every ``.o`` file that ``#include``\s the
+header, bloating ``.o`` file sizes and increasing link times.
+
+Use ``LLVM_DELETED_FUNCTION`` to mark uncallable methods
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Prior to C++11, a common pattern to make a class uncopyable was to declare an
+unimplemented copy constructor and copy assignment operator and make them
+private. This would give a compiler error for accessing a private method or a
+linker error because it wasn't implemented.
+
+With C++11, we can mark methods that won't be implemented with ``= delete``.
+This will trigger a much better error message and tell the compiler that the
+method will never be implemented. This enables other checks like
+``-Wunused-private-field`` to run correctly on classes that contain these
+methods.
+
+To maintain compatibility with C++03, ``LLVM_DELETED_FUNCTION`` should be used
+which will expand to ``= delete`` if the compiler supports it. These methods
+should still be declared private. Example of the uncopyable pattern:
+
+.. code-block:: c++
+
+ class DontCopy {
+ private:
+ DontCopy(const DontCopy&) LLVM_DELETED_FUNCTION;
+ DontCopy &operator =(const DontCopy&) LLVM_DELETED_FUNCTION;
+ public:
+ ...
+ };
+
+Don't evaluate ``end()`` every time through a loop
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Because C++ doesn't have a standard "``foreach``" loop (though it can be
+emulated with macros and may be coming in C++'0x) we end up writing a lot of
+loops that manually iterate from begin to end on a variety of containers or
+through other data structures. One common mistake is to write a loop in this
+style:
+
+.. code-block:: c++
+
+ BasicBlock *BB = ...
+ for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)
+ ... use I ...
+
+The problem with this construct is that it evaluates "``BB->end()``" every time
+through the loop. Instead of writing the loop like this, we strongly prefer
+loops to be written so that they evaluate it once before the loop starts. A
+convenient way to do this is like so:
+
+.. code-block:: c++
+
+ BasicBlock *BB = ...
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
+ ... use I ...
+
+The observant may quickly point out that these two loops may have different
+semantics: if the container (a basic block in this case) is being mutated, then
+"``BB->end()``" may change its value every time through the loop and the second
+loop may not in fact be correct. If you actually do depend on this behavior,
+please write the loop in the first form and add a comment indicating that you
+did it intentionally.
+
+Why do we prefer the second form (when correct)? Writing the loop in the first
+form has two problems. First it may be less efficient than evaluating it at the
+start of the loop. In this case, the cost is probably minor --- a few extra
+loads every time through the loop. However, if the base expression is more
+complex, then the cost can rise quickly. I've seen loops where the end
+expression was actually something like: "``SomeMap[x]->end()``" and map lookups
+really aren't cheap. By writing it in the second form consistently, you
+eliminate the issue entirely and don't even have to think about it.
+
+The second (even bigger) issue is that writing the loop in the first form hints
+to the reader that the loop is mutating the container (a fact that a comment
+would handily confirm!). If you write the loop in the second form, it is
+immediately obvious without even looking at the body of the loop that the
+container isn't being modified, which makes it easier to read the code and
+understand what it does.
+
+While the second form of the loop is a few extra keystrokes, we do strongly
+prefer it.
+
+``#include <iostream>`` is Forbidden
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The use of ``#include <iostream>`` in library files is hereby **forbidden**,
+because many common implementations transparently inject a `static constructor`_
+into every translation unit that includes it.
+
+Note that using the other stream headers (``<sstream>`` for example) is not
+problematic in this regard --- just ``<iostream>``. However, ``raw_ostream``
+provides various APIs that are better performing for almost every use than
+``std::ostream`` style APIs.
+
+.. note::
+
+ New code should always use `raw_ostream`_ for writing, or the
+ ``llvm::MemoryBuffer`` API for reading files.
+
+.. _raw_ostream:
+
+Use ``raw_ostream``
+^^^^^^^^^^^^^^^^^^^
+
+LLVM includes a lightweight, simple, and efficient stream implementation in
+``llvm/Support/raw_ostream.h``, which provides all of the common features of
+``std::ostream``. All new code should use ``raw_ostream`` instead of
+``ostream``.
+
+Unlike ``std::ostream``, ``raw_ostream`` is not a template and can be forward
+declared as ``class raw_ostream``. Public headers should generally not include
+the ``raw_ostream`` header, but use forward declarations and constant references
+to ``raw_ostream`` instances.
+
+Avoid ``std::endl``
+^^^^^^^^^^^^^^^^^^^
+
+The ``std::endl`` modifier, when used with ``iostreams`` outputs a newline to
+the output stream specified. In addition to doing this, however, it also
+flushes the output stream. In other words, these are equivalent:
+
+.. code-block:: c++
+
+ std::cout << std::endl;
+ std::cout << '\n' << std::flush;
+
+Most of the time, you probably have no reason to flush the output stream, so
+it's better to use a literal ``'\n'``.
+
+Microscopic Details
+-------------------
+
+This section describes preferred low-level formatting guidelines along with
+reasoning on why we prefer them.
+
+Spaces Before Parentheses
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We prefer to put a space before an open parenthesis only in control flow
+statements, but not in normal function call expressions and function-like
+macros. For example, this is good:
+
+.. code-block:: c++
+
+ if (x) ...
+ for (i = 0; i != 100; ++i) ...
+ while (llvm_rocks) ...
+
+ somefunc(42);
+ assert(3 != 4 && "laws of math are failing me");
+
+ a = foo(42, 92) + bar(x);
+
+and this is bad:
+
+.. code-block:: c++
+
+ if(x) ...
+ for(i = 0; i != 100; ++i) ...
+ while(llvm_rocks) ...
+
+ somefunc (42);
+ assert (3 != 4 && "laws of math are failing me");
+
+ a = foo (42, 92) + bar (x);
+
+The reason for doing this is not completely arbitrary. This style makes control
+flow operators stand out more, and makes expressions flow better. The function
+call operator binds very tightly as a postfix operator. Putting a space after a
+function name (as in the last example) makes it appear that the code might bind
+the arguments of the left-hand-side of a binary operator with the argument list
+of a function and the name of the right side. More specifically, it is easy to
+misread the "``a``" example as:
+
+.. code-block:: c++
+
+ a = foo ((42, 92) + bar) (x);
+
+when skimming through the code. By avoiding a space in a function, we avoid
+this misinterpretation.
+
+Prefer Preincrement
+^^^^^^^^^^^^^^^^^^^
+
+Hard fast rule: Preincrement (``++X``) may be no slower than postincrement
+(``X++``) and could very well be a lot faster than it. Use preincrementation
+whenever possible.
+
+The semantics of postincrement include making a copy of the value being
+incremented, returning it, and then preincrementing the "work value". For
+primitive types, this isn't a big deal. But for iterators, it can be a huge
+issue (for example, some iterators contains stack and set objects in them...
+copying an iterator could invoke the copy ctor's of these as well). In general,
+get in the habit of always using preincrement, and you won't have a problem.
+
+
+Namespace Indentation
+^^^^^^^^^^^^^^^^^^^^^
+
+In general, we strive to reduce indentation wherever possible. This is useful
+because we want code to `fit into 80 columns`_ without wrapping horribly, but
+also because it makes it easier to understand the code. Namespaces are a funny
+thing: they are often large, and we often desire to put lots of stuff into them
+(so they can be large). Other times they are tiny, because they just hold an
+enum or something similar. In order to balance this, we use different
+approaches for small versus large namespaces.
+
+If a namespace definition is small and *easily* fits on a screen (say, less than
+35 lines of code), then you should indent its body. Here's an example:
+
+.. code-block:: c++
+
+ namespace llvm {
+ namespace X86 {
+ /// RelocationType - An enum for the x86 relocation codes. Note that
+ /// the terminology here doesn't follow x86 convention - word means
+ /// 32-bit and dword means 64-bit.
+ enum RelocationType {
+ /// reloc_pcrel_word - PC relative relocation, add the relocated value to
+ /// the value already in memory, after we adjust it for where the PC is.
+ reloc_pcrel_word = 0,
+
+ /// reloc_picrel_word - PIC base relative relocation, add the relocated
+ /// value to the value already in memory, after we adjust it for where the
+ /// PIC base is.
+ reloc_picrel_word = 1,
+
+ /// reloc_absolute_word, reloc_absolute_dword - Absolute relocation, just
+ /// add the relocated value to the value already in memory.
+ reloc_absolute_word = 2,
+ reloc_absolute_dword = 3
+ };
+ }
+ }
+
+Since the body is small, indenting adds value because it makes it very clear
+where the namespace starts and ends, and it is easy to take the whole thing in
+in one "gulp" when reading the code. If the blob of code in the namespace is
+larger (as it typically is in a header in the ``llvm`` or ``clang`` namespaces),
+do not indent the code, and add a comment indicating what namespace is being
+closed. For example:
+
+.. code-block:: c++
+
+ namespace llvm {
+ namespace knowledge {
+
+ /// Grokable - This class represents things that Smith can have an intimate
+ /// understanding of and contains the data associated with it.
+ class Grokable {
+ ...
+ public:
+ explicit Grokable() { ... }
+ virtual ~Grokable() = 0;
+
+ ...
+
+ };
+
+ } // end namespace knowledge
+ } // end namespace llvm
+
+Because the class is large, we don't expect that the reader can easily
+understand the entire concept in a glance, and the end of the file (where the
+namespaces end) may be a long ways away from the place they open. As such,
+indenting the contents of the namespace doesn't add any value, and detracts from
+the readability of the class. In these cases it is best to *not* indent the
+contents of the namespace.
+
+.. _static:
+
+Anonymous Namespaces
+^^^^^^^^^^^^^^^^^^^^
+
+After talking about namespaces in general, you may be wondering about anonymous
+namespaces in particular. Anonymous namespaces are a great language feature
+that tells the C++ compiler that the contents of the namespace are only visible
+within the current translation unit, allowing more aggressive optimization and
+eliminating the possibility of symbol name collisions. Anonymous namespaces are
+to C++ as "static" is to C functions and global variables. While "``static``"
+is available in C++, anonymous namespaces are more general: they can make entire
+classes private to a file.
+
+The problem with anonymous namespaces is that they naturally want to encourage
+indentation of their body, and they reduce locality of reference: if you see a
+random function definition in a C++ file, it is easy to see if it is marked
+static, but seeing if it is in an anonymous namespace requires scanning a big
+chunk of the file.
+
+Because of this, we have a simple guideline: make anonymous namespaces as small
+as possible, and only use them for class declarations. For example, this is
+good:
+
+.. code-block:: c++
+
+ namespace {
+ class StringSort {
+ ...
+ public:
+ StringSort(...)
+ bool operator<(const char *RHS) const;
+ };
+ } // end anonymous namespace
+
+ static void Helper() {
+ ...
+ }
+
+ bool StringSort::operator<(const char *RHS) const {
+ ...
+ }
+
+This is bad:
+
+.. code-block:: c++
+
+ namespace {
+ class StringSort {
+ ...
+ public:
+ StringSort(...)
+ bool operator<(const char *RHS) const;
+ };
+
+ void Helper() {
+ ...
+ }
+
+ bool StringSort::operator<(const char *RHS) const {
+ ...
+ }
+
+ } // end anonymous namespace
+
+This is bad specifically because if you're looking at "``Helper``" in the middle
+of a large C++ file, that you have no immediate way to tell if it is local to
+the file. When it is marked static explicitly, this is immediately obvious.
+Also, there is no reason to enclose the definition of "``operator<``" in the
+namespace just because it was declared there.
+
+See Also
+========
+
+A lot of these comments and recommendations have been culled for other sources.
+Two particularly important books for our work are:
+
+#. `Effective C++
+ <http://www.amazon.com/Effective-Specific-Addison-Wesley-Professional-Computing/dp/0321334876>`_
+ by Scott Meyers. Also interesting and useful are "More Effective C++" and
+ "Effective STL" by the same author.
+
+#. `Large-Scale C++ Software Design
+ <http://www.amazon.com/Large-Scale-Software-Design-John-Lakos/dp/0201633620/ref=sr_1_1>`_
+ by John Lakos
+
+If you get some free time, and you haven't read them: do so, you might learn
+something.
diff --git a/docs/CommandGuide/FileCheck.rst b/docs/CommandGuide/FileCheck.rst
new file mode 100644
index 00000000000..51a9bf6293b
--- /dev/null
+++ b/docs/CommandGuide/FileCheck.rst
@@ -0,0 +1,284 @@
+FileCheck - Flexible pattern matching file verifier
+===================================================
+
+
+SYNOPSIS
+--------
+
+
+**FileCheck** *match-filename* [*--check-prefix=XXX*] [*--strict-whitespace*]
+
+
+DESCRIPTION
+-----------
+
+
+**FileCheck** reads two files (one from standard input, and one specified on the
+command line) and uses one to verify the other. This behavior is particularly
+useful for the testsuite, which wants to verify that the output of some tool
+(e.g. llc) contains the expected information (for example, a movsd from esp or
+whatever is interesting). This is similar to using grep, but it is optimized
+for matching multiple different inputs in one file in a specific order.
+
+The *match-filename* file specifies the file that contains the patterns to
+match. The file to verify is always read from standard input.
+
+
+OPTIONS
+-------
+
+
+
+**-help**
+
+ Print a summary of command line options.
+
+
+
+**--check-prefix** *prefix*
+
+ FileCheck searches the contents of *match-filename* for patterns to match. By
+ default, these patterns are prefixed with "CHECK:". If you'd like to use a
+ different prefix (e.g. because the same input file is checking multiple
+ different tool or options), the **--check-prefix** argument allows you to specify
+ a specific prefix to match.
+
+
+
+**--strict-whitespace**
+
+ By default, FileCheck canonicalizes input horizontal whitespace (spaces and
+ tabs) which causes it to ignore these differences (a space will match a tab).
+ The --strict-whitespace argument disables this behavior.
+
+
+
+**-version**
+
+ Show the version number of this program.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+If **FileCheck** verifies that the file matches the expected contents, it exits
+with 0. Otherwise, if not, or if an error occurs, it will exit with a non-zero
+value.
+
+
+TUTORIAL
+--------
+
+
+FileCheck is typically used from LLVM regression tests, being invoked on the RUN
+line of the test. A simple example of using FileCheck from a RUN line looks
+like this:
+
+
+.. code-block:: llvm
+
+ ; RUN: llvm-as < %s | llc -march=x86-64 | FileCheck %s
+
+
+This syntax says to pipe the current file ("%s") into llvm-as, pipe that into
+llc, then pipe the output of llc into FileCheck. This means that FileCheck will
+be verifying its standard input (the llc output) against the filename argument
+specified (the original .ll file specified by "%s"). To see how this works,
+let's look at the rest of the .ll file (after the RUN line):
+
+
+.. code-block:: llvm
+
+ define void @sub1(i32* %p, i32 %v) {
+ entry:
+ ; CHECK: sub1:
+ ; CHECK: subl
+ %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 %v)
+ ret void
+ }
+
+ define void @inc4(i64* %p) {
+ entry:
+ ; CHECK: inc4:
+ ; CHECK: incq
+ %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 1)
+ ret void
+ }
+
+
+Here you can see some "CHECK:" lines specified in comments. Now you can see
+how the file is piped into llvm-as, then llc, and the machine code output is
+what we are verifying. FileCheck checks the machine code output to verify that
+it matches what the "CHECK:" lines specify.
+
+The syntax of the CHECK: lines is very simple: they are fixed strings that
+must occur in order. FileCheck defaults to ignoring horizontal whitespace
+differences (e.g. a space is allowed to match a tab) but otherwise, the contents
+of the CHECK: line is required to match some thing in the test file exactly.
+
+One nice thing about FileCheck (compared to grep) is that it allows merging
+test cases together into logical groups. For example, because the test above
+is checking for the "sub1:" and "inc4:" labels, it will not match unless there
+is a "subl" in between those labels. If it existed somewhere else in the file,
+that would not count: "grep subl" matches if subl exists anywhere in the
+file.
+
+The FileCheck -check-prefix option
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+The FileCheck -check-prefix option allows multiple test configurations to be
+driven from one .ll file. This is useful in many circumstances, for example,
+testing different architectural variants with llc. Here's a simple example:
+
+
+.. code-block:: llvm
+
+ ; RUN: llvm-as < %s | llc -mtriple=i686-apple-darwin9 -mattr=sse41 \
+ ; RUN: | FileCheck %s -check-prefix=X32
+ ; RUN: llvm-as < %s | llc -mtriple=x86_64-apple-darwin9 -mattr=sse41 \
+ ; RUN: | FileCheck %s -check-prefix=X64
+
+ define <4 x i32> @pinsrd_1(i32 %s, <4 x i32> %tmp) nounwind {
+ %tmp1 = insertelement <4 x i32>; %tmp, i32 %s, i32 1
+ ret <4 x i32> %tmp1
+ ; X32: pinsrd_1:
+ ; X32: pinsrd $1, 4(%esp), %xmm0
+
+ ; X64: pinsrd_1:
+ ; X64: pinsrd $1, %edi, %xmm0
+ }
+
+
+In this case, we're testing that we get the expected code generation with
+both 32-bit and 64-bit code generation.
+
+
+The "CHECK-NEXT:" directive
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+Sometimes you want to match lines and would like to verify that matches
+happen on exactly consecutive lines with no other lines in between them. In
+this case, you can use CHECK: and CHECK-NEXT: directives to specify this. If
+you specified a custom check prefix, just use "<PREFIX>-NEXT:". For
+example, something like this works as you'd expect:
+
+
+.. code-block:: llvm
+
+ define void @t2(<2 x double>* %r, <2 x double>* %A, double %B) {
+ %tmp3 = load <2 x double>* %A, align 16
+ %tmp7 = insertelement <2 x double> undef, double %B, i32 0
+ %tmp9 = shufflevector <2 x double> %tmp3,
+ <2 x double> %tmp7,
+ <2 x i32> < i32 0, i32 2 >
+ store <2 x double> %tmp9, <2 x double>* %r, align 16
+ ret void
+
+ ; CHECK: t2:
+ ; CHECK: movl 8(%esp), %eax
+ ; CHECK-NEXT: movapd (%eax), %xmm0
+ ; CHECK-NEXT: movhpd 12(%esp), %xmm0
+ ; CHECK-NEXT: movl 4(%esp), %eax
+ ; CHECK-NEXT: movapd %xmm0, (%eax)
+ ; CHECK-NEXT: ret
+ }
+
+
+CHECK-NEXT: directives reject the input unless there is exactly one newline
+between it an the previous directive. A CHECK-NEXT cannot be the first
+directive in a file.
+
+
+The "CHECK-NOT:" directive
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+The CHECK-NOT: directive is used to verify that a string doesn't occur
+between two matches (or before the first match, or after the last match). For
+example, to verify that a load is removed by a transformation, a test like this
+can be used:
+
+
+.. code-block:: llvm
+
+ define i8 @coerce_offset0(i32 %V, i32* %P) {
+ store i32 %V, i32* %P
+
+ %P2 = bitcast i32* %P to i8*
+ %P3 = getelementptr i8* %P2, i32 2
+
+ %A = load i8* %P3
+ ret i8 %A
+ ; CHECK: @coerce_offset0
+ ; CHECK-NOT: load
+ ; CHECK: ret i8
+ }
+
+
+
+FileCheck Pattern Matching Syntax
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+The CHECK: and CHECK-NOT: directives both take a pattern to match. For most
+uses of FileCheck, fixed string matching is perfectly sufficient. For some
+things, a more flexible form of matching is desired. To support this, FileCheck
+allows you to specify regular expressions in matching strings, surrounded by
+double braces: **{{yourregex}}**. Because we want to use fixed string
+matching for a majority of what we do, FileCheck has been designed to support
+mixing and matching fixed string matching with regular expressions. This allows
+you to write things like this:
+
+
+.. code-block:: llvm
+
+ ; CHECK: movhpd {{[0-9]+}}(%esp), {{%xmm[0-7]}}
+
+
+In this case, any offset from the ESP register will be allowed, and any xmm
+register will be allowed.
+
+Because regular expressions are enclosed with double braces, they are
+visually distinct, and you don't need to use escape characters within the double
+braces like you would in C. In the rare case that you want to match double
+braces explicitly from the input, you can use something ugly like
+**{{[{][{]}}** as your pattern.
+
+
+FileCheck Variables
+~~~~~~~~~~~~~~~~~~~
+
+
+It is often useful to match a pattern and then verify that it occurs again
+later in the file. For codegen tests, this can be useful to allow any register,
+but verify that that register is used consistently later. To do this, FileCheck
+allows named variables to be defined and substituted into patterns. Here is a
+simple example:
+
+
+.. code-block:: llvm
+
+ ; CHECK: test5:
+ ; CHECK: notw [[REGISTER:%[a-z]+]]
+ ; CHECK: andw {{.*}}[[REGISTER]]
+
+
+The first check line matches a regex (**%[a-z]+**) and captures it into
+the variable "REGISTER". The second line verifies that whatever is in REGISTER
+occurs later in the file after an "andw". FileCheck variable references are
+always contained in **[[ ]]** pairs, are named, and their names can be
+name, then it is a definition of the variable, if not, it is a use.
+
+FileCheck variables can be defined multiple times, and uses always get the
+latest value. Note that variables are all read at the start of a "CHECK" line
+and are all defined at the end. This means that if you have something like
+"**CHECK: [[XYZ:.\\*]]x[[XYZ]]**", the check line will read the previous
+value of the XYZ variable and define a new one after the match is performed. If
+you need to do something like this you can probably take advantage of the fact
+that FileCheck is not actually line-oriented when it matches, this allows you to
+define two separate CHECK lines that match on the same line.
diff --git a/docs/CommandGuide/bugpoint.rst b/docs/CommandGuide/bugpoint.rst
new file mode 100644
index 00000000000..c1b3b6eca62
--- /dev/null
+++ b/docs/CommandGuide/bugpoint.rst
@@ -0,0 +1,247 @@
+bugpoint - automatic test case reduction tool
+=============================================
+
+
+SYNOPSIS
+--------
+
+
+**bugpoint** [*options*] [*input LLVM ll/bc files*] [*LLVM passes*] **--args**
+*program arguments*
+
+
+DESCRIPTION
+-----------
+
+
+**bugpoint** narrows down the source of problems in LLVM tools and passes. It
+can be used to debug three types of failures: optimizer crashes, miscompilations
+by optimizers, or bad native code generation (including problems in the static
+and JIT compilers). It aims to reduce large test cases to small, useful ones.
+For more information on the design and inner workings of **bugpoint**, as well as
+advice for using bugpoint, see *llvm/docs/Bugpoint.html* in the LLVM
+distribution.
+
+
+OPTIONS
+-------
+
+
+
+**--additional-so** *library*
+
+ Load the dynamic shared object *library* into the test program whenever it is
+ run. This is useful if you are debugging programs which depend on non-LLVM
+ libraries (such as the X or curses libraries) to run.
+
+
+
+**--append-exit-code**\ =\ *{true,false}*
+
+ Append the test programs exit code to the output file so that a change in exit
+ code is considered a test failure. Defaults to false.
+
+
+
+**--args** *program args*
+
+ Pass all arguments specified after -args to the test program whenever it runs.
+ Note that if any of the *program args* start with a '-', you should use:
+
+
+ .. code-block:: perl
+
+ bugpoint [bugpoint args] --args -- [program args]
+
+
+ The "--" right after the **--args** option tells **bugpoint** to consider any
+ options starting with ``-`` to be part of the **--args** option, not as options to
+ **bugpoint** itself.
+
+
+
+**--tool-args** *tool args*
+
+ Pass all arguments specified after --tool-args to the LLVM tool under test
+ (**llc**, **lli**, etc.) whenever it runs. You should use this option in the
+ following way:
+
+
+ .. code-block:: perl
+
+ bugpoint [bugpoint args] --tool-args -- [tool args]
+
+
+ The "--" right after the **--tool-args** option tells **bugpoint** to consider any
+ options starting with ``-`` to be part of the **--tool-args** option, not as
+ options to **bugpoint** itself. (See **--args**, above.)
+
+
+
+**--safe-tool-args** *tool args*
+
+ Pass all arguments specified after **--safe-tool-args** to the "safe" execution
+ tool.
+
+
+
+**--gcc-tool-args** *gcc tool args*
+
+ Pass all arguments specified after **--gcc-tool-args** to the invocation of
+ **gcc**.
+
+
+
+**--opt-args** *opt args*
+
+ Pass all arguments specified after **--opt-args** to the invocation of **opt**.
+
+
+
+**--disable-{dce,simplifycfg}**
+
+ Do not run the specified passes to clean up and reduce the size of the test
+ program. By default, **bugpoint** uses these passes internally when attempting to
+ reduce test programs. If you're trying to find a bug in one of these passes,
+ **bugpoint** may crash.
+
+
+
+**--enable-valgrind**
+
+ Use valgrind to find faults in the optimization phase. This will allow
+ bugpoint to find otherwise asymptomatic problems caused by memory
+ mis-management.
+
+
+
+**-find-bugs**
+
+ Continually randomize the specified passes and run them on the test program
+ until a bug is found or the user kills **bugpoint**.
+
+
+
+**-help**
+
+ Print a summary of command line options.
+
+
+
+**--input** *filename*
+
+ Open *filename* and redirect the standard input of the test program, whenever
+ it runs, to come from that file.
+
+
+
+**--load** *plugin*
+
+ Load the dynamic object *plugin* into **bugpoint** itself. This object should
+ register new optimization passes. Once loaded, the object will add new command
+ line options to enable various optimizations. To see the new complete list of
+ optimizations, use the **-help** and **--load** options together; for example:
+
+
+ .. code-block:: perl
+
+ bugpoint --load myNewPass.so -help
+
+
+
+
+**--mlimit** *megabytes*
+
+ Specifies an upper limit on memory usage of the optimization and codegen. Set
+ to zero to disable the limit.
+
+
+
+**--output** *filename*
+
+ Whenever the test program produces output on its standard output stream, it
+ should match the contents of *filename* (the "reference output"). If you
+ do not use this option, **bugpoint** will attempt to generate a reference output
+ by compiling the program with the "safe" backend and running it.
+
+
+
+**--profile-info-file** *filename*
+
+ Profile file loaded by **--profile-loader**.
+
+
+
+**--run-{int,jit,llc,custom}**
+
+ Whenever the test program is compiled, **bugpoint** should generate code for it
+ using the specified code generator. These options allow you to choose the
+ interpreter, the JIT compiler, the static native code compiler, or a
+ custom command (see **--exec-command**) respectively.
+
+
+
+**--safe-{llc,custom}**
+
+ When debugging a code generator, **bugpoint** should use the specified code
+ generator as the "safe" code generator. This is a known-good code generator
+ used to generate the "reference output" if it has not been provided, and to
+ compile portions of the program that as they are excluded from the testcase.
+ These options allow you to choose the
+ static native code compiler, or a custom command, (see **--exec-command**)
+ respectively. The interpreter and the JIT backends cannot currently
+ be used as the "safe" backends.
+
+
+
+**--exec-command** *command*
+
+ This option defines the command to use with the **--run-custom** and
+ **--safe-custom** options to execute the bitcode testcase. This can
+ be useful for cross-compilation.
+
+
+
+**--compile-command** *command*
+
+ This option defines the command to use with the **--compile-custom**
+ option to compile the bitcode testcase. This can be useful for
+ testing compiler output without running any link or execute stages. To
+ generate a reduced unit test, you may add CHECK directives to the
+ testcase and pass the name of an executable compile-command script in this form:
+
+
+ .. code-block:: sh
+
+ #!/bin/sh
+ llc "$@"
+ not FileCheck [bugpoint input file].ll < bugpoint-test-program.s
+
+
+ This script will "fail" as long as FileCheck passes. So the result
+ will be the minimum bitcode that passes FileCheck.
+
+
+
+**--safe-path** *path*
+
+ This option defines the path to the command to execute with the
+ **--safe-{int,jit,llc,custom}**
+ option.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+If **bugpoint** succeeds in finding a problem, it will exit with 0. Otherwise,
+if an error occurs, it will exit with a non-zero value.
+
+
+SEE ALSO
+--------
+
+
+opt|opt
diff --git a/docs/CommandGuide/index.rst b/docs/CommandGuide/index.rst
new file mode 100644
index 00000000000..73a4835dd7a
--- /dev/null
+++ b/docs/CommandGuide/index.rst
@@ -0,0 +1,53 @@
+.. _commands:
+
+LLVM Command Guide
+------------------
+
+The following documents are command descriptions for all of the LLVM tools.
+These pages describe how to use the LLVM commands and what their options are.
+Note that these pages do not describe all of the options available for all
+tools. To get a complete listing, pass the ``--help`` (general options) or
+``--help-hidden`` (general and debugging options) arguments to the tool you are
+interested in.
+
+Basic Commands
+~~~~~~~~~~~~~~
+
+.. toctree::
+ :maxdepth: 1
+
+ llvm-as
+ llvm-dis
+ opt
+ llc
+ lli
+ llvm-link
+ llvm-ar
+ llvm-ranlib
+ llvm-nm
+ llvm-prof
+ llvm-config
+ llvm-diff
+ llvm-cov
+ llvm-stress
+
+Debugging Tools
+~~~~~~~~~~~~~~~
+
+.. toctree::
+ :maxdepth: 1
+
+ bugpoint
+ llvm-extract
+ llvm-bcanalyzer
+
+Developer Tools
+~~~~~~~~~~~~~~~
+
+.. toctree::
+ :maxdepth: 1
+
+ FileCheck
+ tblgen
+ lit
+ llvm-build
diff --git a/docs/CommandGuide/lit.rst b/docs/CommandGuide/lit.rst
new file mode 100644
index 00000000000..3eb0be91f13
--- /dev/null
+++ b/docs/CommandGuide/lit.rst
@@ -0,0 +1,474 @@
+lit - LLVM Integrated Tester
+============================
+
+
+SYNOPSIS
+--------
+
+
+**lit** [*options*] [*tests*]
+
+
+DESCRIPTION
+-----------
+
+
+**lit** is a portable tool for executing LLVM and Clang style test suites,
+summarizing their results, and providing indication of failures. **lit** is
+designed to be a lightweight testing tool with as simple a user interface as
+possible.
+
+**lit** should be run with one or more *tests* to run specified on the command
+line. Tests can be either individual test files or directories to search for
+tests (see "TEST DISCOVERY").
+
+Each specified test will be executed (potentially in parallel) and once all
+tests have been run **lit** will print summary information on the number of tests
+which passed or failed (see "TEST STATUS RESULTS"). The **lit** program will
+execute with a non-zero exit code if any tests fail.
+
+By default **lit** will use a succinct progress display and will only print
+summary information for test failures. See "OUTPUT OPTIONS" for options
+controlling the **lit** progress display and output.
+
+**lit** also includes a number of options for controlling how tests are executed
+(specific features may depend on the particular test format). See "EXECUTION
+OPTIONS" for more information.
+
+Finally, **lit** also supports additional options for only running a subset of
+the options specified on the command line, see "SELECTION OPTIONS" for
+more information.
+
+Users interested in the **lit** architecture or designing a **lit** testing
+implementation should see "LIT INFRASTRUCTURE"
+
+
+GENERAL OPTIONS
+---------------
+
+
+
+**-h**, **--help**
+
+ Show the **lit** help message.
+
+
+
+**-j** *N*, **--threads**\ =\ *N*
+
+ Run *N* tests in parallel. By default, this is automatically chosen to match
+ the number of detected available CPUs.
+
+
+
+**--config-prefix**\ =\ *NAME*
+
+ Search for *NAME.cfg* and *NAME.site.cfg* when searching for test suites,
+ instead of *lit.cfg* and *lit.site.cfg*.
+
+
+
+**--param** *NAME*, **--param** *NAME*\ =\ *VALUE*
+
+ Add a user defined parameter *NAME* with the given *VALUE* (or the empty
+ string if not given). The meaning and use of these parameters is test suite
+ dependent.
+
+
+
+
+OUTPUT OPTIONS
+--------------
+
+
+
+**-q**, **--quiet**
+
+ Suppress any output except for test failures.
+
+
+
+**-s**, **--succinct**
+
+ Show less output, for example don't show information on tests that pass.
+
+
+
+**-v**, **--verbose**
+
+ Show more information on test failures, for example the entire test output
+ instead of just the test result.
+
+
+
+**--no-progress-bar**
+
+ Do not use curses based progress bar.
+
+
+
+
+EXECUTION OPTIONS
+-----------------
+
+
+
+**--path**\ =\ *PATH*
+
+ Specify an addition *PATH* to use when searching for executables in tests.
+
+
+
+**--vg**
+
+ Run individual tests under valgrind (using the memcheck tool). The
+ *--error-exitcode* argument for valgrind is used so that valgrind failures will
+ cause the program to exit with a non-zero status.
+
+
+
+**--vg-arg**\ =\ *ARG*
+
+ When *--vg* is used, specify an additional argument to pass to valgrind itself.
+
+
+
+**--time-tests**
+
+ Track the wall time individual tests take to execute and includes the results in
+ the summary output. This is useful for determining which tests in a test suite
+ take the most time to execute. Note that this option is most useful with *-j
+ 1*.
+
+
+
+
+SELECTION OPTIONS
+-----------------
+
+
+
+**--max-tests**\ =\ *N*
+
+ Run at most *N* tests and then terminate.
+
+
+
+**--max-time**\ =\ *N*
+
+ Spend at most *N* seconds (approximately) running tests and then terminate.
+
+
+
+**--shuffle**
+
+ Run the tests in a random order.
+
+
+
+
+ADDITIONAL OPTIONS
+------------------
+
+
+
+**--debug**
+
+ Run **lit** in debug mode, for debugging configuration issues and **lit** itself.
+
+
+
+**--show-suites**
+
+ List the discovered test suites as part of the standard output.
+
+
+
+**--no-tcl-as-sh**
+
+ Run Tcl scripts internally (instead of converting to shell scripts).
+
+
+
+**--repeat**\ =\ *N*
+
+ Run each test *N* times. Currently this is primarily useful for timing tests,
+ other results are not collated in any reasonable fashion.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+**lit** will exit with an exit code of 1 if there are any FAIL or XPASS
+results. Otherwise, it will exit with the status 0. Other exit codes are used
+for non-test related failures (for example a user error or an internal program
+error).
+
+
+TEST DISCOVERY
+--------------
+
+
+The inputs passed to **lit** can be either individual tests, or entire
+directories or hierarchies of tests to run. When **lit** starts up, the first
+thing it does is convert the inputs into a complete list of tests to run as part
+of *test discovery*.
+
+In the **lit** model, every test must exist inside some *test suite*. **lit**
+resolves the inputs specified on the command line to test suites by searching
+upwards from the input path until it finds a *lit.cfg* or *lit.site.cfg*
+file. These files serve as both a marker of test suites and as configuration
+files which **lit** loads in order to understand how to find and run the tests
+inside the test suite.
+
+Once **lit** has mapped the inputs into test suites it traverses the list of
+inputs adding tests for individual files and recursively searching for tests in
+directories.
+
+This behavior makes it easy to specify a subset of tests to run, while still
+allowing the test suite configuration to control exactly how tests are
+interpreted. In addition, **lit** always identifies tests by the test suite they
+are in, and their relative path inside the test suite. For appropriately
+configured projects, this allows **lit** to provide convenient and flexible
+support for out-of-tree builds.
+
+
+TEST STATUS RESULTS
+-------------------
+
+
+Each test ultimately produces one of the following six results:
+
+
+**PASS**
+
+ The test succeeded.
+
+
+
+**XFAIL**
+
+ The test failed, but that is expected. This is used for test formats which allow
+ specifying that a test does not currently work, but wish to leave it in the test
+ suite.
+
+
+
+**XPASS**
+
+ The test succeeded, but it was expected to fail. This is used for tests which
+ were specified as expected to fail, but are now succeeding (generally because
+ the feature they test was broken and has been fixed).
+
+
+
+**FAIL**
+
+ The test failed.
+
+
+
+**UNRESOLVED**
+
+ The test result could not be determined. For example, this occurs when the test
+ could not be run, the test itself is invalid, or the test was interrupted.
+
+
+
+**UNSUPPORTED**
+
+ The test is not supported in this environment. This is used by test formats
+ which can report unsupported tests.
+
+
+
+Depending on the test format tests may produce additional information about
+their status (generally only for failures). See the Output|"OUTPUT OPTIONS"
+section for more information.
+
+
+LIT INFRASTRUCTURE
+------------------
+
+
+This section describes the **lit** testing architecture for users interested in
+creating a new **lit** testing implementation, or extending an existing one.
+
+**lit** proper is primarily an infrastructure for discovering and running
+arbitrary tests, and to expose a single convenient interface to these
+tests. **lit** itself doesn't know how to run tests, rather this logic is
+defined by *test suites*.
+
+TEST SUITES
+~~~~~~~~~~~
+
+
+As described in "TEST DISCOVERY", tests are always located inside a *test
+suite*. Test suites serve to define the format of the tests they contain, the
+logic for finding those tests, and any additional information to run the tests.
+
+**lit** identifies test suites as directories containing *lit.cfg* or
+*lit.site.cfg* files (see also **--config-prefix**). Test suites are initially
+discovered by recursively searching up the directory hierarchy for all the input
+files passed on the command line. You can use **--show-suites** to display the
+discovered test suites at startup.
+
+Once a test suite is discovered, its config file is loaded. Config files
+themselves are Python modules which will be executed. When the config file is
+executed, two important global variables are predefined:
+
+
+**lit**
+
+ The global **lit** configuration object (a *LitConfig* instance), which defines
+ the builtin test formats, global configuration parameters, and other helper
+ routines for implementing test configurations.
+
+
+
+**config**
+
+ This is the config object (a *TestingConfig* instance) for the test suite,
+ which the config file is expected to populate. The following variables are also
+ available on the *config* object, some of which must be set by the config and
+ others are optional or predefined:
+
+ **name** *[required]* The name of the test suite, for use in reports and
+ diagnostics.
+
+ **test_format** *[required]* The test format object which will be used to
+ discover and run tests in the test suite. Generally this will be a builtin test
+ format available from the *lit.formats* module.
+
+ **test_src_root** The filesystem path to the test suite root. For out-of-dir
+ builds this is the directory that will be scanned for tests.
+
+ **test_exec_root** For out-of-dir builds, the path to the test suite root inside
+ the object directory. This is where tests will be run and temporary output files
+ placed.
+
+ **environment** A dictionary representing the environment to use when executing
+ tests in the suite.
+
+ **suffixes** For **lit** test formats which scan directories for tests, this
+ variable is a list of suffixes to identify test files. Used by: *ShTest*,
+ *TclTest*.
+
+ **substitutions** For **lit** test formats which substitute variables into a test
+ script, the list of substitutions to perform. Used by: *ShTest*, *TclTest*.
+
+ **unsupported** Mark an unsupported directory, all tests within it will be
+ reported as unsupported. Used by: *ShTest*, *TclTest*.
+
+ **parent** The parent configuration, this is the config object for the directory
+ containing the test suite, or None.
+
+ **root** The root configuration. This is the top-most **lit** configuration in
+ the project.
+
+ **on_clone** The config is actually cloned for every subdirectory inside a test
+ suite, to allow local configuration on a per-directory basis. The *on_clone*
+ variable can be set to a Python function which will be called whenever a
+ configuration is cloned (for a subdirectory). The function should takes three
+ arguments: (1) the parent configuration, (2) the new configuration (which the
+ *on_clone* function will generally modify), and (3) the test path to the new
+ directory being scanned.
+
+
+
+
+TEST DISCOVERY
+~~~~~~~~~~~~~~
+
+
+Once test suites are located, **lit** recursively traverses the source directory
+(following *test_src_root*) looking for tests. When **lit** enters a
+sub-directory, it first checks to see if a nested test suite is defined in that
+directory. If so, it loads that test suite recursively, otherwise it
+instantiates a local test config for the directory (see "LOCAL CONFIGURATION
+FILES").
+
+Tests are identified by the test suite they are contained within, and the
+relative path inside that suite. Note that the relative path may not refer to an
+actual file on disk; some test formats (such as *GoogleTest*) define "virtual
+tests" which have a path that contains both the path to the actual test file and
+a subpath to identify the virtual test.
+
+
+LOCAL CONFIGURATION FILES
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+When **lit** loads a subdirectory in a test suite, it instantiates a local test
+configuration by cloning the configuration for the parent direction -- the root
+of this configuration chain will always be a test suite. Once the test
+configuration is cloned **lit** checks for a *lit.local.cfg* file in the
+subdirectory. If present, this file will be loaded and can be used to specialize
+the configuration for each individual directory. This facility can be used to
+define subdirectories of optional tests, or to change other configuration
+parameters -- for example, to change the test format, or the suffixes which
+identify test files.
+
+
+TEST RUN OUTPUT FORMAT
+~~~~~~~~~~~~~~~~~~~~~~
+
+
+The b<lit> output for a test run conforms to the following schema, in both short
+and verbose modes (although in short mode no PASS lines will be shown). This
+schema has been chosen to be relatively easy to reliably parse by a machine (for
+example in buildbot log scraping), and for other tools to generate.
+
+Each test result is expected to appear on a line that matches:
+
+<result code>: <test name> (<progress info>)
+
+where <result-code> is a standard test result such as PASS, FAIL, XFAIL, XPASS,
+UNRESOLVED, or UNSUPPORTED. The performance result codes of IMPROVED and
+REGRESSED are also allowed.
+
+The <test name> field can consist of an arbitrary string containing no newline.
+
+The <progress info> field can be used to report progress information such as
+(1/300) or can be empty, but even when empty the parentheses are required.
+
+Each test result may include additional (multiline) log information in the
+following format.
+
+<log delineator> TEST '(<test name>)' <trailing delineator>
+... log message ...
+<log delineator>
+
+where <test name> should be the name of a preceding reported test, <log
+delineator> is a string of '\*' characters *at least* four characters long (the
+recommended length is 20), and <trailing delineator> is an arbitrary (unparsed)
+string.
+
+The following is an example of a test run output which consists of four tests A,
+B, C, and D, and a log message for the failing test C::
+
+ PASS: A (1 of 4)
+ PASS: B (2 of 4)
+ FAIL: C (3 of 4)
+ \*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\* TEST 'C' FAILED \*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*
+ Test 'C' failed as a result of exit code 1.
+ \*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*
+ PASS: D (4 of 4)
+
+
+LIT EXAMPLE TESTS
+~~~~~~~~~~~~~~~~~
+
+
+The **lit** distribution contains several example implementations of test suites
+in the *ExampleTests* directory.
+
+
+SEE ALSO
+--------
+
+
+valgrind(1)
diff --git a/docs/CommandGuide/llc.rst b/docs/CommandGuide/llc.rst
new file mode 100644
index 00000000000..6f1c486c3f4
--- /dev/null
+++ b/docs/CommandGuide/llc.rst
@@ -0,0 +1,251 @@
+llc - LLVM static compiler
+==========================
+
+
+SYNOPSIS
+--------
+
+
+**llc** [*options*] [*filename*]
+
+
+DESCRIPTION
+-----------
+
+
+The **llc** command compiles LLVM source inputs into assembly language for a
+specified architecture. The assembly language output can then be passed through
+a native assembler and linker to generate a native executable.
+
+The choice of architecture for the output assembly code is automatically
+determined from the input file, unless the **-march** option is used to override
+the default.
+
+
+OPTIONS
+-------
+
+
+If *filename* is - or omitted, **llc** reads from standard input. Otherwise, it
+will from *filename*. Inputs can be in either the LLVM assembly language
+format (.ll) or the LLVM bitcode format (.bc).
+
+If the **-o** option is omitted, then **llc** will send its output to standard
+output if the input is from standard input. If the **-o** option specifies -,
+then the output will also be sent to standard output.
+
+If no **-o** option is specified and an input file other than - is specified,
+then **llc** creates the output filename by taking the input filename,
+removing any existing *.bc* extension, and adding a *.s* suffix.
+
+Other **llc** options are as follows:
+
+End-user Options
+~~~~~~~~~~~~~~~~
+
+
+
+**-help**
+
+ Print a summary of command line options.
+
+
+
+**-O**\ =\ *uint*
+
+ Generate code at different optimization levels. These correspond to the *-O0*,
+ *-O1*, *-O2*, and *-O3* optimization levels used by **llvm-gcc** and
+ **clang**.
+
+
+
+**-mtriple**\ =\ *target triple*
+
+ Override the target triple specified in the input file with the specified
+ string.
+
+
+
+**-march**\ =\ *arch*
+
+ Specify the architecture for which to generate assembly, overriding the target
+ encoded in the input file. See the output of **llc -help** for a list of
+ valid architectures. By default this is inferred from the target triple or
+ autodetected to the current architecture.
+
+
+
+**-mcpu**\ =\ *cpuname*
+
+ Specify a specific chip in the current architecture to generate code for.
+ By default this is inferred from the target triple and autodetected to
+ the current architecture. For a list of available CPUs, use:
+ **llvm-as < /dev/null | llc -march=xyz -mcpu=help**
+
+
+
+**-mattr**\ =\ *a1,+a2,-a3,...*
+
+ Override or control specific attributes of the target, such as whether SIMD
+ operations are enabled or not. The default set of attributes is set by the
+ current CPU. For a list of available attributes, use:
+ **llvm-as < /dev/null | llc -march=xyz -mattr=help**
+
+
+
+**--disable-fp-elim**
+
+ Disable frame pointer elimination optimization.
+
+
+
+**--disable-excess-fp-precision**
+
+ Disable optimizations that may produce excess precision for floating point.
+ Note that this option can dramatically slow down code on some systems
+ (e.g. X86).
+
+
+
+**--enable-no-infs-fp-math**
+
+ Enable optimizations that assume no Inf values.
+
+
+
+**--enable-no-nans-fp-math**
+
+ Enable optimizations that assume no NAN values.
+
+
+
+**--enable-unsafe-fp-math**
+
+ Enable optimizations that make unsafe assumptions about IEEE math (e.g. that
+ addition is associative) or may not work for all input ranges. These
+ optimizations allow the code generator to make use of some instructions which
+ would otherwise not be usable (such as fsin on X86).
+
+
+
+**--enable-correct-eh-support**
+
+ Instruct the **lowerinvoke** pass to insert code for correct exception handling
+ support. This is expensive and is by default omitted for efficiency.
+
+
+
+**--stats**
+
+ Print statistics recorded by code-generation passes.
+
+
+
+**--time-passes**
+
+ Record the amount of time needed for each pass and print a report to standard
+ error.
+
+
+
+**--load**\ =\ *dso_path*
+
+ Dynamically load *dso_path* (a path to a dynamically shared object) that
+ implements an LLVM target. This will permit the target name to be used with the
+ **-march** option so that code can be generated for that target.
+
+
+
+
+Tuning/Configuration Options
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+
+**--print-machineinstrs**
+
+ Print generated machine code between compilation phases (useful for debugging).
+
+
+
+**--regalloc**\ =\ *allocator*
+
+ Specify the register allocator to use. The default *allocator* is *local*.
+ Valid register allocators are:
+
+
+ *simple*
+
+ Very simple "always spill" register allocator
+
+
+
+ *local*
+
+ Local register allocator
+
+
+
+ *linearscan*
+
+ Linear scan global register allocator
+
+
+
+ *iterativescan*
+
+ Iterative scan global register allocator
+
+
+
+
+
+**--spiller**\ =\ *spiller*
+
+ Specify the spiller to use for register allocators that support it. Currently
+ this option is used only by the linear scan register allocator. The default
+ *spiller* is *local*. Valid spillers are:
+
+
+ *simple*
+
+ Simple spiller
+
+
+
+ *local*
+
+ Local spiller
+
+
+
+
+
+
+Intel IA-32-specific Options
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+
+**--x86-asm-syntax=att|intel**
+
+ Specify whether to emit assembly code in AT&T syntax (the default) or intel
+ syntax.
+
+
+
+
+
+EXIT STATUS
+-----------
+
+
+If **llc** succeeds, it will exit with 0. Otherwise, if an error occurs,
+it will exit with a non-zero value.
+
+
+SEE ALSO
+--------
+
+
+lli|lli
diff --git a/docs/CommandGuide/lli.rst b/docs/CommandGuide/lli.rst
new file mode 100644
index 00000000000..7cc128444da
--- /dev/null
+++ b/docs/CommandGuide/lli.rst
@@ -0,0 +1,300 @@
+lli - directly execute programs from LLVM bitcode
+=================================================
+
+
+SYNOPSIS
+--------
+
+
+**lli** [*options*] [*filename*] [*program args*]
+
+
+DESCRIPTION
+-----------
+
+
+**lli** directly executes programs in LLVM bitcode format. It takes a program
+in LLVM bitcode format and executes it using a just-in-time compiler, if one is
+available for the current architecture, or an interpreter. **lli** takes all of
+the same code generator options as llc|llc, but they are only effective when
+**lli** is using the just-in-time compiler.
+
+If *filename* is not specified, then **lli** reads the LLVM bitcode for the
+program from standard input.
+
+The optional *args* specified on the command line are passed to the program as
+arguments.
+
+
+GENERAL OPTIONS
+---------------
+
+
+
+**-fake-argv0**\ =\ *executable*
+
+ Override the ``argv[0]`` value passed into the executing program.
+
+
+
+**-force-interpreter**\ =\ *{false,true}*
+
+ If set to true, use the interpreter even if a just-in-time compiler is available
+ for this architecture. Defaults to false.
+
+
+
+**-help**
+
+ Print a summary of command line options.
+
+
+
+**-load**\ =\ *puginfilename*
+
+ Causes **lli** to load the plugin (shared object) named *pluginfilename* and use
+ it for optimization.
+
+
+
+**-stats**
+
+ Print statistics from the code-generation passes. This is only meaningful for
+ the just-in-time compiler, at present.
+
+
+
+**-time-passes**
+
+ Record the amount of time needed for each code-generation pass and print it to
+ standard error.
+
+
+
+**-version**
+
+ Print out the version of **lli** and exit without doing anything else.
+
+
+
+
+TARGET OPTIONS
+--------------
+
+
+
+**-mtriple**\ =\ *target triple*
+
+ Override the target triple specified in the input bitcode file with the
+ specified string. This may result in a crash if you pick an
+ architecture which is not compatible with the current system.
+
+
+
+**-march**\ =\ *arch*
+
+ Specify the architecture for which to generate assembly, overriding the target
+ encoded in the bitcode file. See the output of **llc -help** for a list of
+ valid architectures. By default this is inferred from the target triple or
+ autodetected to the current architecture.
+
+
+
+**-mcpu**\ =\ *cpuname*
+
+ Specify a specific chip in the current architecture to generate code for.
+ By default this is inferred from the target triple and autodetected to
+ the current architecture. For a list of available CPUs, use:
+ **llvm-as < /dev/null | llc -march=xyz -mcpu=help**
+
+
+
+**-mattr**\ =\ *a1,+a2,-a3,...*
+
+ Override or control specific attributes of the target, such as whether SIMD
+ operations are enabled or not. The default set of attributes is set by the
+ current CPU. For a list of available attributes, use:
+ **llvm-as < /dev/null | llc -march=xyz -mattr=help**
+
+
+
+
+FLOATING POINT OPTIONS
+----------------------
+
+
+
+**-disable-excess-fp-precision**
+
+ Disable optimizations that may increase floating point precision.
+
+
+
+**-enable-no-infs-fp-math**
+
+ Enable optimizations that assume no Inf values.
+
+
+
+**-enable-no-nans-fp-math**
+
+ Enable optimizations that assume no NAN values.
+
+
+
+**-enable-unsafe-fp-math**
+
+ Causes **lli** to enable optimizations that may decrease floating point
+ precision.
+
+
+
+**-soft-float**
+
+ Causes **lli** to generate software floating point library calls instead of
+ equivalent hardware instructions.
+
+
+
+
+CODE GENERATION OPTIONS
+-----------------------
+
+
+
+**-code-model**\ =\ *model*
+
+ Choose the code model from:
+
+
+ .. code-block:: perl
+
+ default: Target default code model
+ small: Small code model
+ kernel: Kernel code model
+ medium: Medium code model
+ large: Large code model
+
+
+
+
+**-disable-post-RA-scheduler**
+
+ Disable scheduling after register allocation.
+
+
+
+**-disable-spill-fusing**
+
+ Disable fusing of spill code into instructions.
+
+
+
+**-enable-correct-eh-support**
+
+ Make the -lowerinvoke pass insert expensive, but correct, EH code.
+
+
+
+**-jit-enable-eh**
+
+ Exception handling should be enabled in the just-in-time compiler.
+
+
+
+**-join-liveintervals**
+
+ Coalesce copies (default=true).
+
+
+
+**-nozero-initialized-in-bss** Don't place zero-initialized symbols into the BSS section.
+
+
+
+**-pre-RA-sched**\ =\ *scheduler*
+
+ Instruction schedulers available (before register allocation):
+
+
+ .. code-block:: perl
+
+ =default: Best scheduler for the target
+ =none: No scheduling: breadth first sequencing
+ =simple: Simple two pass scheduling: minimize critical path and maximize processor utilization
+ =simple-noitin: Simple two pass scheduling: Same as simple except using generic latency
+ =list-burr: Bottom-up register reduction list scheduling
+ =list-tdrr: Top-down register reduction list scheduling
+ =list-td: Top-down list scheduler -print-machineinstrs - Print generated machine code
+
+
+
+
+**-regalloc**\ =\ *allocator*
+
+ Register allocator to use (default=linearscan)
+
+
+ .. code-block:: perl
+
+ =bigblock: Big-block register allocator
+ =linearscan: linear scan register allocator =local - local register allocator
+ =simple: simple register allocator
+
+
+
+
+**-relocation-model**\ =\ *model*
+
+ Choose relocation model from:
+
+
+ .. code-block:: perl
+
+ =default: Target default relocation model
+ =static: Non-relocatable code =pic - Fully relocatable, position independent code
+ =dynamic-no-pic: Relocatable external references, non-relocatable code
+
+
+
+
+**-spiller**
+
+ Spiller to use (default=local)
+
+
+ .. code-block:: perl
+
+ =simple: simple spiller
+ =local: local spiller
+
+
+
+
+**-x86-asm-syntax**\ =\ *syntax*
+
+ Choose style of code to emit from X86 backend:
+
+
+ .. code-block:: perl
+
+ =att: Emit AT&T-style assembly
+ =intel: Emit Intel-style assembly
+
+
+
+
+
+EXIT STATUS
+-----------
+
+
+If **lli** fails to load the program, it will exit with an exit code of 1.
+Otherwise, it will return the exit code of the program it executes.
+
+
+SEE ALSO
+--------
+
+
+llc|llc
diff --git a/docs/CommandGuide/llvm-ar.rst b/docs/CommandGuide/llvm-ar.rst
new file mode 100644
index 00000000000..8ff4192a500
--- /dev/null
+++ b/docs/CommandGuide/llvm-ar.rst
@@ -0,0 +1,458 @@
+llvm-ar - LLVM archiver
+=======================
+
+
+SYNOPSIS
+--------
+
+
+**llvm-ar** [-]{dmpqrtx}[Rabfikou] [relpos] [count] <archive> [files...]
+
+
+DESCRIPTION
+-----------
+
+
+The **llvm-ar** command is similar to the common Unix utility, ``ar``. It
+archives several files together into a single file. The intent for this is
+to produce archive libraries by LLVM bitcode that can be linked into an
+LLVM program. However, the archive can contain any kind of file. By default,
+**llvm-ar** generates a symbol table that makes linking faster because
+only the symbol table needs to be consulted, not each individual file member
+of the archive.
+
+The **llvm-ar** command can be used to *read* both SVR4 and BSD style archive
+files. However, it cannot be used to write them. While the **llvm-ar** command
+produces files that are *almost* identical to the format used by other ``ar``
+implementations, it has two significant departures in order to make the
+archive appropriate for LLVM. The first departure is that **llvm-ar** only
+uses BSD4.4 style long path names (stored immediately after the header) and
+never contains a string table for long names. The second departure is that the
+symbol table is formated for efficient construction of an in-memory data
+structure that permits rapid (red-black tree) lookups. Consequently, archives
+produced with **llvm-ar** usually won't be readable or editable with any
+``ar`` implementation or useful for linking. Using the ``f`` modifier to flatten
+file names will make the archive readable by other ``ar`` implementations
+but not for linking because the symbol table format for LLVM is unique. If an
+SVR4 or BSD style archive is used with the ``r`` (replace) or ``q`` (quick
+update) operations, the archive will be reconstructed in LLVM format. This
+means that the string table will be dropped (in deference to BSD 4.4 long names)
+and an LLVM symbol table will be added (by default). The system symbol table
+will be retained.
+
+Here's where **llvm-ar** departs from previous ``ar`` implementations:
+
+
+*Symbol Table*
+
+ Since **llvm-ar** is intended to archive bitcode files, the symbol table
+ won't make much sense to anything but LLVM. Consequently, the symbol table's
+ format has been simplified. It consists simply of a sequence of pairs
+ of a file member index number as an LSB 4byte integer and a null-terminated
+ string.
+
+
+
+*Long Paths*
+
+ Some ``ar`` implementations (SVR4) use a separate file member to record long
+ path names (> 15 characters). **llvm-ar** takes the BSD 4.4 and Mac OS X
+ approach which is to simply store the full path name immediately preceding
+ the data for the file. The path name is null terminated and may contain the
+ slash (/) character.
+
+
+
+*Directory Recursion*
+
+ Most ``ar`` implementations do not recurse through directories but simply
+ ignore directories if they are presented to the program in the *files*
+ option. **llvm-ar**, however, can recurse through directory structures and
+ add all the files under a directory, if requested.
+
+
+
+*TOC Verbose Output*
+
+ When **llvm-ar** prints out the verbose table of contents (``tv`` option), it
+ precedes the usual output with a character indicating the basic kind of
+ content in the file. A blank means the file is a regular file. A 'B' means
+ the file is an LLVM bitcode file. An 'S' means the file is the symbol table.
+
+
+
+
+OPTIONS
+-------
+
+
+The options to **llvm-ar** are compatible with other ``ar`` implementations.
+However, there are a few modifiers (*R*) that are not found in other ``ar``
+implementations. The options to **llvm-ar** specify a single basic operation to
+perform on the archive, a variety of modifiers for that operation, the name of
+the archive file, and an optional list of file names. These options are used to
+determine how **llvm-ar** should process the archive file.
+
+The Operations and Modifiers are explained in the sections below. The minimal
+set of options is at least one operator and the name of the archive. Typically
+archive files end with a ``.a`` suffix, but this is not required. Following
+the *archive-name* comes a list of *files* that indicate the specific members
+of the archive to operate on. If the *files* option is not specified, it
+generally means either "none" or "all" members, depending on the operation.
+
+Operations
+~~~~~~~~~~
+
+
+
+d
+
+ Delete files from the archive. No modifiers are applicable to this operation.
+ The *files* options specify which members should be removed from the
+ archive. It is not an error if a specified file does not appear in the archive.
+ If no *files* are specified, the archive is not modified.
+
+
+
+m[abi]
+
+ Move files from one location in the archive to another. The *a*, *b*, and
+ *i* modifiers apply to this operation. The *files* will all be moved
+ to the location given by the modifiers. If no modifiers are used, the files
+ will be moved to the end of the archive. If no *files* are specified, the
+ archive is not modified.
+
+
+
+p[k]
+
+ Print files to the standard output. The *k* modifier applies to this
+ operation. This operation simply prints the *files* indicated to the
+ standard output. If no *files* are specified, the entire archive is printed.
+ Printing bitcode files is ill-advised as they might confuse your terminal
+ settings. The *p* operation never modifies the archive.
+
+
+
+q[Rf]
+
+ Quickly append files to the end of the archive. The *R*, and *f*
+ modifiers apply to this operation. This operation quickly adds the
+ *files* to the archive without checking for duplicates that should be
+ removed first. If no *files* are specified, the archive is not modified.
+ Because of the way that **llvm-ar** constructs the archive file, its dubious
+ whether the *q* operation is any faster than the *r* operation.
+
+
+
+r[Rabfu]
+
+ Replace or insert file members. The *R*, *a*, *b*, *f*, and *u*
+ modifiers apply to this operation. This operation will replace existing
+ *files* or insert them at the end of the archive if they do not exist. If no
+ *files* are specified, the archive is not modified.
+
+
+
+t[v]
+
+ Print the table of contents. Without any modifiers, this operation just prints
+ the names of the members to the standard output. With the *v* modifier,
+ **llvm-ar** also prints out the file type (B=bitcode, S=symbol
+ table, blank=regular file), the permission mode, the owner and group, the
+ size, and the date. If any *files* are specified, the listing is only for
+ those files. If no *files* are specified, the table of contents for the
+ whole archive is printed.
+
+
+
+x[oP]
+
+ Extract archive members back to files. The *o* modifier applies to this
+ operation. This operation retrieves the indicated *files* from the archive
+ and writes them back to the operating system's file system. If no
+ *files* are specified, the entire archive is extract.
+
+
+
+
+Modifiers (operation specific)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+The modifiers below are specific to certain operations. See the Operations
+section (above) to determine which modifiers are applicable to which operations.
+
+
+[a]
+
+ When inserting or moving member files, this option specifies the destination of
+ the new files as being after the *relpos* member. If *relpos* is not found,
+ the files are placed at the end of the archive.
+
+
+
+[b]
+
+ When inserting or moving member files, this option specifies the destination of
+ the new files as being before the *relpos* member. If *relpos* is not
+ found, the files are placed at the end of the archive. This modifier is
+ identical to the *i* modifier.
+
+
+
+[f]
+
+ Normally, **llvm-ar** stores the full path name to a file as presented to it on
+ the command line. With this option, truncated (15 characters max) names are
+ used. This ensures name compatibility with older versions of ``ar`` but may also
+ thwart correct extraction of the files (duplicates may overwrite). If used with
+ the *R* option, the directory recursion will be performed but the file names
+ will all be flattened to simple file names.
+
+
+
+[i]
+
+ A synonym for the *b* option.
+
+
+
+[k]
+
+ Normally, **llvm-ar** will not print the contents of bitcode files when the
+ *p* operation is used. This modifier defeats the default and allows the
+ bitcode members to be printed.
+
+
+
+[N]
+
+ This option is ignored by **llvm-ar** but provided for compatibility.
+
+
+
+[o]
+
+ When extracting files, this option will cause **llvm-ar** to preserve the
+ original modification times of the files it writes.
+
+
+
+[P]
+
+ use full path names when matching
+
+
+
+[R]
+
+ This modifier instructions the *r* option to recursively process directories.
+ Without *R*, directories are ignored and only those *files* that refer to
+ files will be added to the archive. When *R* is used, any directories specified
+ with *files* will be scanned (recursively) to find files to be added to the
+ archive. Any file whose name begins with a dot will not be added.
+
+
+
+[u]
+
+ When replacing existing files in the archive, only replace those files that have
+ a time stamp than the time stamp of the member in the archive.
+
+
+
+
+Modifiers (generic)
+~~~~~~~~~~~~~~~~~~~
+
+
+The modifiers below may be applied to any operation.
+
+
+[c]
+
+ For all operations, **llvm-ar** will always create the archive if it doesn't
+ exist. Normally, **llvm-ar** will print a warning message indicating that the
+ archive is being created. Using this modifier turns off that warning.
+
+
+
+[s]
+
+ This modifier requests that an archive index (or symbol table) be added to the
+ archive. This is the default mode of operation. The symbol table will contain
+ all the externally visible functions and global variables defined by all the
+ bitcode files in the archive. Using this modifier is more efficient that using
+ llvm-ranlib|llvm-ranlib which also creates the symbol table.
+
+
+
+[S]
+
+ This modifier is the opposite of the *s* modifier. It instructs **llvm-ar** to
+ not build the symbol table. If both *s* and *S* are used, the last modifier to
+ occur in the options will prevail.
+
+
+
+[v]
+
+ This modifier instructs **llvm-ar** to be verbose about what it is doing. Each
+ editing operation taken against the archive will produce a line of output saying
+ what is being done.
+
+
+
+
+
+STANDARDS
+---------
+
+
+The **llvm-ar** utility is intended to provide a superset of the IEEE Std 1003.2
+(POSIX.2) functionality for ``ar``. **llvm-ar** can read both SVR4 and BSD4.4 (or
+Mac OS X) archives. If the ``f`` modifier is given to the ``x`` or ``r`` operations
+then **llvm-ar** will write SVR4 compatible archives. Without this modifier,
+**llvm-ar** will write BSD4.4 compatible archives that have long names
+immediately after the header and indicated using the "#1/ddd" notation for the
+name in the header.
+
+
+FILE FORMAT
+-----------
+
+
+The file format for LLVM Archive files is similar to that of BSD 4.4 or Mac OSX
+archive files. In fact, except for the symbol table, the ``ar`` commands on those
+operating systems should be able to read LLVM archive files. The details of the
+file format follow.
+
+Each archive begins with the archive magic number which is the eight printable
+characters "!<arch>\n" where \n represents the newline character (0x0A).
+Following the magic number, the file is composed of even length members that
+begin with an archive header and end with a \n padding character if necessary
+(to make the length even). Each file member is composed of a header (defined
+below), an optional newline-terminated "long file name" and the contents of
+the file.
+
+The fields of the header are described in the items below. All fields of the
+header contain only ASCII characters, are left justified and are right padded
+with space characters.
+
+
+name - char[16]
+
+ This field of the header provides the name of the archive member. If the name is
+ longer than 15 characters or contains a slash (/) character, then this field
+ contains ``#1/nnn`` where ``nnn`` provides the length of the name and the ``#1/``
+ is literal. In this case, the actual name of the file is provided in the ``nnn``
+ bytes immediately following the header. If the name is 15 characters or less, it
+ is contained directly in this field and terminated with a slash (/) character.
+
+
+
+date - char[12]
+
+ This field provides the date of modification of the file in the form of a
+ decimal encoded number that provides the number of seconds since the epoch
+ (since 00:00:00 Jan 1, 1970) per Posix specifications.
+
+
+
+uid - char[6]
+
+ This field provides the user id of the file encoded as a decimal ASCII string.
+ This field might not make much sense on non-Unix systems. On Unix, it is the
+ same value as the st_uid field of the stat structure returned by the stat(2)
+ operating system call.
+
+
+
+gid - char[6]
+
+ This field provides the group id of the file encoded as a decimal ASCII string.
+ This field might not make much sense on non-Unix systems. On Unix, it is the
+ same value as the st_gid field of the stat structure returned by the stat(2)
+ operating system call.
+
+
+
+mode - char[8]
+
+ This field provides the access mode of the file encoded as an octal ASCII
+ string. This field might not make much sense on non-Unix systems. On Unix, it
+ is the same value as the st_mode field of the stat structure returned by the
+ stat(2) operating system call.
+
+
+
+size - char[10]
+
+ This field provides the size of the file, in bytes, encoded as a decimal ASCII
+ string.
+
+
+
+fmag - char[2]
+
+ This field is the archive file member magic number. Its content is always the
+ two characters back tick (0x60) and newline (0x0A). This provides some measure
+ utility in identifying archive files that have been corrupted.
+
+
+
+The LLVM symbol table has the special name "#_LLVM_SYM_TAB_#". It is presumed
+that no regular archive member file will want this name. The LLVM symbol table
+is simply composed of a sequence of triplets: byte offset, length of symbol,
+and the symbol itself. Symbols are not null or newline terminated. Here are
+the details on each of these items:
+
+
+offset - vbr encoded 32-bit integer
+
+ The offset item provides the offset into the archive file where the bitcode
+ member is stored that is associated with the symbol. The offset value is 0
+ based at the start of the first "normal" file member. To derive the actual
+ file offset of the member, you must add the number of bytes occupied by the file
+ signature (8 bytes) and the symbol tables. The value of this item is encoded
+ using variable bit rate encoding to reduce the size of the symbol table.
+ Variable bit rate encoding uses the high bit (0x80) of each byte to indicate
+ if there are more bytes to follow. The remaining 7 bits in each byte carry bits
+ from the value. The final byte does not have the high bit set.
+
+
+
+length - vbr encoded 32-bit integer
+
+ The length item provides the length of the symbol that follows. Like this
+ *offset* item, the length is variable bit rate encoded.
+
+
+
+symbol - character array
+
+ The symbol item provides the text of the symbol that is associated with the
+ *offset*. The symbol is not terminated by any character. Its length is provided
+ by the *length* field. Note that is allowed (but unwise) to use non-printing
+ characters (even 0x00) in the symbol. This allows for multiple encodings of
+ symbol names.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+If **llvm-ar** succeeds, it will exit with 0. A usage error, results
+in an exit code of 1. A hard (file system typically) error results in an
+exit code of 2. Miscellaneous or unknown errors result in an
+exit code of 3.
+
+
+SEE ALSO
+--------
+
+
+llvm-ranlib|llvm-ranlib, ar(1)
diff --git a/docs/CommandGuide/llvm-as.rst b/docs/CommandGuide/llvm-as.rst
new file mode 100644
index 00000000000..1b499bbe970
--- /dev/null
+++ b/docs/CommandGuide/llvm-as.rst
@@ -0,0 +1,56 @@
+llvm-as - LLVM assembler
+========================
+
+SYNOPSIS
+--------
+
+**llvm-as** [*options*] [*filename*]
+
+DESCRIPTION
+-----------
+
+**llvm-as** is the LLVM assembler. It reads a file containing human-readable
+LLVM assembly language, translates it to LLVM bitcode, and writes the result
+into a file or to standard output.
+
+If *filename* is omitted or is ``-``, then **llvm-as** reads its input from
+standard input.
+
+If an output file is not specified with the **-o** option, then
+**llvm-as** sends its output to a file or standard output by following
+these rules:
+
+* If the input is standard input, then the output is standard output.
+
+* If the input is a file that ends with ``.ll``, then the output file is of the
+ same name, except that the suffix is changed to ``.bc``.
+
+* If the input is a file that does not end with the ``.ll`` suffix, then the
+ output file has the same name as the input file, except that the ``.bc``
+ suffix is appended.
+
+OPTIONS
+-------
+
+**-f**
+ Enable binary output on terminals. Normally, **llvm-as** will refuse to
+ write raw bitcode output if the output stream is a terminal. With this option,
+ **llvm-as** will write raw bitcode regardless of the output device.
+
+**-help**
+ Print a summary of command line options.
+
+**-o** *filename*
+ Specify the output file name. If *filename* is ``-``, then **llvm-as**
+ sends its output to standard output.
+
+EXIT STATUS
+-----------
+
+If **llvm-as** succeeds, it will exit with 0. Otherwise, if an error occurs, it
+will exit with a non-zero value.
+
+SEE ALSO
+--------
+
+llvm-dis|llvm-dis, gccas|gccas
diff --git a/docs/CommandGuide/llvm-bcanalyzer.rst b/docs/CommandGuide/llvm-bcanalyzer.rst
new file mode 100644
index 00000000000..f1e4eac1be5
--- /dev/null
+++ b/docs/CommandGuide/llvm-bcanalyzer.rst
@@ -0,0 +1,424 @@
+llvm-bcanalyzer - LLVM bitcode analyzer
+=======================================
+
+
+SYNOPSIS
+--------
+
+
+**llvm-bcanalyzer** [*options*] [*filename*]
+
+
+DESCRIPTION
+-----------
+
+
+The **llvm-bcanalyzer** command is a small utility for analyzing bitcode files.
+The tool reads a bitcode file (such as generated with the **llvm-as** tool) and
+produces a statistical report on the contents of the bitcode file. The tool
+can also dump a low level but human readable version of the bitcode file.
+This tool is probably not of much interest or utility except for those working
+directly with the bitcode file format. Most LLVM users can just ignore
+this tool.
+
+If *filename* is omitted or is ``-``, then **llvm-bcanalyzer** reads its input
+from standard input. This is useful for combining the tool into a pipeline.
+Output is written to the standard output.
+
+
+OPTIONS
+-------
+
+
+
+**-nodetails**
+
+ Causes **llvm-bcanalyzer** to abbreviate its output by writing out only a module
+ level summary. The details for individual functions are not displayed.
+
+
+
+**-dump**
+
+ Causes **llvm-bcanalyzer** to dump the bitcode in a human readable format. This
+ format is significantly different from LLVM assembly and provides details about
+ the encoding of the bitcode file.
+
+
+
+**-verify**
+
+ Causes **llvm-bcanalyzer** to verify the module produced by reading the
+ bitcode. This ensures that the statistics generated are based on a consistent
+ module.
+
+
+
+**-help**
+
+ Print a summary of command line options.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+If **llvm-bcanalyzer** succeeds, it will exit with 0. Otherwise, if an error
+occurs, it will exit with a non-zero value, usually 1.
+
+
+SUMMARY OUTPUT DEFINITIONS
+--------------------------
+
+
+The following items are always printed by llvm-bcanalyzer. They comprize the
+summary output.
+
+
+**Bitcode Analysis Of Module**
+
+ This just provides the name of the module for which bitcode analysis is being
+ generated.
+
+
+
+**Bitcode Version Number**
+
+ The bitcode version (not LLVM version) of the file read by the analyzer.
+
+
+
+**File Size**
+
+ The size, in bytes, of the entire bitcode file.
+
+
+
+**Module Bytes**
+
+ The size, in bytes, of the module block. Percentage is relative to File Size.
+
+
+
+**Function Bytes**
+
+ The size, in bytes, of all the function blocks. Percentage is relative to File
+ Size.
+
+
+
+**Global Types Bytes**
+
+ The size, in bytes, of the Global Types Pool. Percentage is relative to File
+ Size. This is the size of the definitions of all types in the bitcode file.
+
+
+
+**Constant Pool Bytes**
+
+ The size, in bytes, of the Constant Pool Blocks Percentage is relative to File
+ Size.
+
+
+
+**Module Globals Bytes**
+
+ Ths size, in bytes, of the Global Variable Definitions and their initializers.
+ Percentage is relative to File Size.
+
+
+
+**Instruction List Bytes**
+
+ The size, in bytes, of all the instruction lists in all the functions.
+ Percentage is relative to File Size. Note that this value is also included in
+ the Function Bytes.
+
+
+
+**Compaction Table Bytes**
+
+ The size, in bytes, of all the compaction tables in all the functions.
+ Percentage is relative to File Size. Note that this value is also included in
+ the Function Bytes.
+
+
+
+**Symbol Table Bytes**
+
+ The size, in bytes, of all the symbol tables in all the functions. Percentage is
+ relative to File Size. Note that this value is also included in the Function
+ Bytes.
+
+
+
+**Dependent Libraries Bytes**
+
+ The size, in bytes, of the list of dependent libraries in the module. Percentage
+ is relative to File Size. Note that this value is also included in the Module
+ Global Bytes.
+
+
+
+**Number Of Bitcode Blocks**
+
+ The total number of blocks of any kind in the bitcode file.
+
+
+
+**Number Of Functions**
+
+ The total number of function definitions in the bitcode file.
+
+
+
+**Number Of Types**
+
+ The total number of types defined in the Global Types Pool.
+
+
+
+**Number Of Constants**
+
+ The total number of constants (of any type) defined in the Constant Pool.
+
+
+
+**Number Of Basic Blocks**
+
+ The total number of basic blocks defined in all functions in the bitcode file.
+
+
+
+**Number Of Instructions**
+
+ The total number of instructions defined in all functions in the bitcode file.
+
+
+
+**Number Of Long Instructions**
+
+ The total number of long instructions defined in all functions in the bitcode
+ file. Long instructions are those taking greater than 4 bytes. Typically long
+ instructions are GetElementPtr with several indices, PHI nodes, and calls to
+ functions with large numbers of arguments.
+
+
+
+**Number Of Operands**
+
+ The total number of operands used in all instructions in the bitcode file.
+
+
+
+**Number Of Compaction Tables**
+
+ The total number of compaction tables in all functions in the bitcode file.
+
+
+
+**Number Of Symbol Tables**
+
+ The total number of symbol tables in all functions in the bitcode file.
+
+
+
+**Number Of Dependent Libs**
+
+ The total number of dependent libraries found in the bitcode file.
+
+
+
+**Total Instruction Size**
+
+ The total size of the instructions in all functions in the bitcode file.
+
+
+
+**Average Instruction Size**
+
+ The average number of bytes per instruction across all functions in the bitcode
+ file. This value is computed by dividing Total Instruction Size by Number Of
+ Instructions.
+
+
+
+**Maximum Type Slot Number**
+
+ The maximum value used for a type's slot number. Larger slot number values take
+ more bytes to encode.
+
+
+
+**Maximum Value Slot Number**
+
+ The maximum value used for a value's slot number. Larger slot number values take
+ more bytes to encode.
+
+
+
+**Bytes Per Value**
+
+ The average size of a Value definition (of any type). This is computed by
+ dividing File Size by the total number of values of any type.
+
+
+
+**Bytes Per Global**
+
+ The average size of a global definition (constants and global variables).
+
+
+
+**Bytes Per Function**
+
+ The average number of bytes per function definition. This is computed by
+ dividing Function Bytes by Number Of Functions.
+
+
+
+**# of VBR 32-bit Integers**
+
+ The total number of 32-bit integers encoded using the Variable Bit Rate
+ encoding scheme.
+
+
+
+**# of VBR 64-bit Integers**
+
+ The total number of 64-bit integers encoded using the Variable Bit Rate encoding
+ scheme.
+
+
+
+**# of VBR Compressed Bytes**
+
+ The total number of bytes consumed by the 32-bit and 64-bit integers that use
+ the Variable Bit Rate encoding scheme.
+
+
+
+**# of VBR Expanded Bytes**
+
+ The total number of bytes that would have been consumed by the 32-bit and 64-bit
+ integers had they not been compressed with the Variable Bit Rage encoding
+ scheme.
+
+
+
+**Bytes Saved With VBR**
+
+ The total number of bytes saved by using the Variable Bit Rate encoding scheme.
+ The percentage is relative to # of VBR Expanded Bytes.
+
+
+
+
+DETAILED OUTPUT DEFINITIONS
+---------------------------
+
+
+The following definitions occur only if the -nodetails option was not given.
+The detailed output provides additional information on a per-function basis.
+
+
+**Type**
+
+ The type signature of the function.
+
+
+
+**Byte Size**
+
+ The total number of bytes in the function's block.
+
+
+
+**Basic Blocks**
+
+ The number of basic blocks defined by the function.
+
+
+
+**Instructions**
+
+ The number of instructions defined by the function.
+
+
+
+**Long Instructions**
+
+ The number of instructions using the long instruction format in the function.
+
+
+
+**Operands**
+
+ The number of operands used by all instructions in the function.
+
+
+
+**Instruction Size**
+
+ The number of bytes consumed by instructions in the function.
+
+
+
+**Average Instruction Size**
+
+ The average number of bytes consumed by the instructions in the function. This
+ value is computed by dividing Instruction Size by Instructions.
+
+
+
+**Bytes Per Instruction**
+
+ The average number of bytes used by the function per instruction. This value is
+ computed by dividing Byte Size by Instructions. Note that this is not the same
+ as Average Instruction Size. It computes a number relative to the total function
+ size not just the size of the instruction list.
+
+
+
+**Number of VBR 32-bit Integers**
+
+ The total number of 32-bit integers found in this function (for any use).
+
+
+
+**Number of VBR 64-bit Integers**
+
+ The total number of 64-bit integers found in this function (for any use).
+
+
+
+**Number of VBR Compressed Bytes**
+
+ The total number of bytes in this function consumed by the 32-bit and 64-bit
+ integers that use the Variable Bit Rate encoding scheme.
+
+
+
+**Number of VBR Expanded Bytes**
+
+ The total number of bytes in this function that would have been consumed by
+ the 32-bit and 64-bit integers had they not been compressed with the Variable
+ Bit Rate encoding scheme.
+
+
+
+**Bytes Saved With VBR**
+
+ The total number of bytes saved in this function by using the Variable Bit
+ Rate encoding scheme. The percentage is relative to # of VBR Expanded Bytes.
+
+
+
+
+SEE ALSO
+--------
+
+
+llvm-dis|llvm-dis, `http://llvm.org/docs/BitCodeFormat.html <http://llvm.org/docs/BitCodeFormat.html>`_
diff --git a/docs/CommandGuide/llvm-build.rst b/docs/CommandGuide/llvm-build.rst
new file mode 100644
index 00000000000..f788f7c5a83
--- /dev/null
+++ b/docs/CommandGuide/llvm-build.rst
@@ -0,0 +1,102 @@
+llvm-build - LLVM Project Build Utility
+=======================================
+
+
+SYNOPSIS
+--------
+
+
+**llvm-build** [*options*]
+
+
+DESCRIPTION
+-----------
+
+
+**llvm-build** is a tool for working with LLVM projects that use the LLVMBuild
+system for describing their components.
+
+At heart, **llvm-build** is responsible for loading, verifying, and manipulating
+the project's component data. The tool is primarily designed for use in
+implementing build systems and tools which need access to the project structure
+information.
+
+
+OPTIONS
+-------
+
+
+
+**-h**, **--help**
+
+ Print the builtin program help.
+
+
+
+**--source-root**\ =\ *PATH*
+
+ If given, load the project at the given source root path. If this option is not
+ given, the location of the project sources will be inferred from the location of
+ the **llvm-build** script itself.
+
+
+
+**--print-tree**
+
+ Print the component tree for the project.
+
+
+
+**--write-library-table**
+
+ Write out the C++ fragment which defines the components, library names, and
+ required libraries. This C++ fragment is built into llvm-config|llvm-config
+ in order to provide clients with the list of required libraries for arbitrary
+ component combinations.
+
+
+
+**--write-llvmbuild**
+
+ Write out new *LLVMBuild.txt* files based on the loaded components. This is
+ useful for auto-upgrading the schema of the files. **llvm-build** will try to a
+ limited extent to preserve the comments which were written in the original
+ source file, although at this time it only preserves block comments that precede
+ the section names in the *LLVMBuild* files.
+
+
+
+**--write-cmake-fragment**
+
+ Write out the LLVMBuild in the form of a CMake fragment, so it can easily be
+ consumed by the CMake based build system. The exact contents and format of this
+ file are closely tied to how LLVMBuild is integrated with CMake, see LLVM's
+ top-level CMakeLists.txt.
+
+
+
+**--write-make-fragment**
+
+ Write out the LLVMBuild in the form of a Makefile fragment, so it can easily be
+ consumed by a Make based build system. The exact contents and format of this
+ file are closely tied to how LLVMBuild is integrated with the Makefiles, see
+ LLVM's Makefile.rules.
+
+
+
+**--llvmbuild-source-root**\ =\ *PATH*
+
+ If given, expect the *LLVMBuild* files for the project to be rooted at the
+ given path, instead of inside the source tree itself. This option is primarily
+ designed for use in conjunction with **--write-llvmbuild** to test changes to
+ *LLVMBuild* schema.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+**llvm-build** exits with 0 if operation was successful. Otherwise, it will exist
+with a non-zero value.
diff --git a/docs/CommandGuide/llvm-config.rst b/docs/CommandGuide/llvm-config.rst
new file mode 100644
index 00000000000..0ebb344c06a
--- /dev/null
+++ b/docs/CommandGuide/llvm-config.rst
@@ -0,0 +1,176 @@
+llvm-config - Print LLVM compilation options
+============================================
+
+
+SYNOPSIS
+--------
+
+
+**llvm-config** *option* [*components*...]
+
+
+DESCRIPTION
+-----------
+
+
+**llvm-config** makes it easier to build applications that use LLVM. It can
+print the compiler flags, linker flags and object libraries needed to link
+against LLVM.
+
+
+EXAMPLES
+--------
+
+
+To link against the JIT:
+
+
+.. code-block:: sh
+
+ g++ `llvm-config --cxxflags` -o HowToUseJIT.o -c HowToUseJIT.cpp
+ g++ `llvm-config --ldflags` -o HowToUseJIT HowToUseJIT.o \
+ `llvm-config --libs engine bcreader scalaropts`
+
+
+
+OPTIONS
+-------
+
+
+
+**--version**
+
+ Print the version number of LLVM.
+
+
+
+**-help**
+
+ Print a summary of **llvm-config** arguments.
+
+
+
+**--prefix**
+
+ Print the installation prefix for LLVM.
+
+
+
+**--src-root**
+
+ Print the source root from which LLVM was built.
+
+
+
+**--obj-root**
+
+ Print the object root used to build LLVM.
+
+
+
+**--bindir**
+
+ Print the installation directory for LLVM binaries.
+
+
+
+**--includedir**
+
+ Print the installation directory for LLVM headers.
+
+
+
+**--libdir**
+
+ Print the installation directory for LLVM libraries.
+
+
+
+**--cxxflags**
+
+ Print the C++ compiler flags needed to use LLVM headers.
+
+
+
+**--ldflags**
+
+ Print the flags needed to link against LLVM libraries.
+
+
+
+**--libs**
+
+ Print all the libraries needed to link against the specified LLVM
+ *components*, including any dependencies.
+
+
+
+**--libnames**
+
+ Similar to **--libs**, but prints the bare filenames of the libraries
+ without **-l** or pathnames. Useful for linking against a not-yet-installed
+ copy of LLVM.
+
+
+
+**--libfiles**
+
+ Similar to **--libs**, but print the full path to each library file. This is
+ useful when creating makefile dependencies, to ensure that a tool is relinked if
+ any library it uses changes.
+
+
+
+**--components**
+
+ Print all valid component names.
+
+
+
+**--targets-built**
+
+ Print the component names for all targets supported by this copy of LLVM.
+
+
+
+**--build-mode**
+
+ Print the build mode used when LLVM was built (e.g. Debug or Release)
+
+
+
+
+COMPONENTS
+----------
+
+
+To print a list of all available components, run **llvm-config
+--components**. In most cases, components correspond directly to LLVM
+libraries. Useful "virtual" components include:
+
+
+**all**
+
+ Includes all LLVM libaries. The default if no components are specified.
+
+
+
+**backend**
+
+ Includes either a native backend or the C backend.
+
+
+
+**engine**
+
+ Includes either a native JIT or the bitcode interpreter.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+If **llvm-config** succeeds, it will exit with 0. Otherwise, if an error
+occurs, it will exit with a non-zero value.
diff --git a/docs/CommandGuide/llvm-cov.rst b/docs/CommandGuide/llvm-cov.rst
new file mode 100644
index 00000000000..09275f6af71
--- /dev/null
+++ b/docs/CommandGuide/llvm-cov.rst
@@ -0,0 +1,51 @@
+llvm-cov - emit coverage information
+====================================
+
+
+SYNOPSIS
+--------
+
+
+**llvm-cov** [-gcno=filename] [-gcda=filename] [dump]
+
+
+DESCRIPTION
+-----------
+
+
+The experimental **llvm-cov** tool reads in description file generated by compiler
+and coverage data file generated by instrumented program. This program assumes
+that the description and data file uses same format as gcov files.
+
+
+OPTIONS
+-------
+
+
+
+**-gcno=filename]**
+
+ This option selects input description file generated by compiler while instrumenting
+ program.
+
+
+
+**-gcda=filename]**
+
+ This option selects coverage data file generated by instrumented compiler.
+
+
+
+**-dump**
+
+ This options enables output dump that is suitable for a developer to help debug
+ **llvm-cov** itself.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+**llvm-cov** returns 1 if it cannot read input files. Otherwise, it exits with zero.
diff --git a/docs/CommandGuide/llvm-diff.rst b/docs/CommandGuide/llvm-diff.rst
new file mode 100644
index 00000000000..991d4fece04
--- /dev/null
+++ b/docs/CommandGuide/llvm-diff.rst
@@ -0,0 +1,56 @@
+llvm-diff - LLVM structural 'diff'
+==================================
+
+
+SYNOPSIS
+--------
+
+
+**llvm-diff** [*options*] *module 1* *module 2* [*global name ...*]
+
+
+DESCRIPTION
+-----------
+
+
+**llvm-diff** compares the structure of two LLVM modules, primarily
+focusing on differences in function definitions. Insignificant
+differences, such as changes in the ordering of globals or in the
+names of local values, are ignored.
+
+An input module will be interpreted as an assembly file if its name
+ends in '.ll'; otherwise it will be read in as a bitcode file.
+
+If a list of global names is given, just the values with those names
+are compared; otherwise, all global values are compared, and
+diagnostics are produced for globals which only appear in one module
+or the other.
+
+**llvm-diff** compares two functions by comparing their basic blocks,
+beginning with the entry blocks. If the terminators seem to match,
+then the corresponding successors are compared; otherwise they are
+ignored. This algorithm is very sensitive to changes in control flow,
+which tend to stop any downstream changes from being detected.
+
+**llvm-diff** is intended as a debugging tool for writers of LLVM
+passes and frontends. It does not have a stable output format.
+
+
+EXIT STATUS
+-----------
+
+
+If **llvm-diff** finds no differences between the modules, it will exit
+with 0 and produce no output. Otherwise it will exit with a non-zero
+value.
+
+
+BUGS
+----
+
+
+Many important differences, like changes in linkage or function
+attributes, are not diagnosed.
+
+Changes in memory behavior (for example, coalescing loads) can cause
+massive detected differences in blocks.
diff --git a/docs/CommandGuide/llvm-dis.rst b/docs/CommandGuide/llvm-dis.rst
new file mode 100644
index 00000000000..85cdca85ecd
--- /dev/null
+++ b/docs/CommandGuide/llvm-dis.rst
@@ -0,0 +1,69 @@
+llvm-dis - LLVM disassembler
+============================
+
+
+SYNOPSIS
+--------
+
+
+**llvm-dis** [*options*] [*filename*]
+
+
+DESCRIPTION
+-----------
+
+
+The **llvm-dis** command is the LLVM disassembler. It takes an LLVM
+bitcode file and converts it into human-readable LLVM assembly language.
+
+If filename is omitted or specified as ``-``, **llvm-dis** reads its
+input from standard input.
+
+If the input is being read from standard input, then **llvm-dis**
+will send its output to standard output by default. Otherwise, the
+output will be written to a file named after the input file, with
+a ``.ll`` suffix added (any existing ``.bc`` suffix will first be
+removed). You can override the choice of output file using the
+**-o** option.
+
+
+OPTIONS
+-------
+
+
+
+**-f**
+
+ Enable binary output on terminals. Normally, **llvm-dis** will refuse to
+ write raw bitcode output if the output stream is a terminal. With this option,
+ **llvm-dis** will write raw bitcode regardless of the output device.
+
+
+
+**-help**
+
+ Print a summary of command line options.
+
+
+
+**-o** *filename*
+
+ Specify the output file name. If *filename* is -, then the output is sent
+ to standard output.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+If **llvm-dis** succeeds, it will exit with 0. Otherwise, if an error
+occurs, it will exit with a non-zero value.
+
+
+SEE ALSO
+--------
+
+
+llvm-as|llvm-as
diff --git a/docs/CommandGuide/llvm-extract.rst b/docs/CommandGuide/llvm-extract.rst
new file mode 100644
index 00000000000..d569e35729e
--- /dev/null
+++ b/docs/CommandGuide/llvm-extract.rst
@@ -0,0 +1,104 @@
+llvm-extract - extract a function from an LLVM module
+=====================================================
+
+
+SYNOPSIS
+--------
+
+
+**llvm-extract** [*options*] **--func** *function-name* [*filename*]
+
+
+DESCRIPTION
+-----------
+
+
+The **llvm-extract** command takes the name of a function and extracts it from
+the specified LLVM bitcode file. It is primarily used as a debugging tool to
+reduce test cases from larger programs that are triggering a bug.
+
+In addition to extracting the bitcode of the specified function,
+**llvm-extract** will also remove unreachable global variables, prototypes, and
+unused types.
+
+The **llvm-extract** command reads its input from standard input if filename is
+omitted or if filename is -. The output is always written to standard output,
+unless the **-o** option is specified (see below).
+
+
+OPTIONS
+-------
+
+
+
+**-f**
+
+ Enable binary output on terminals. Normally, **llvm-extract** will refuse to
+ write raw bitcode output if the output stream is a terminal. With this option,
+ **llvm-extract** will write raw bitcode regardless of the output device.
+
+
+
+**--func** *function-name*
+
+ Extract the function named *function-name* from the LLVM bitcode. May be
+ specified multiple times to extract multiple functions at once.
+
+
+
+**--rfunc** *function-regular-expr*
+
+ Extract the function(s) matching *function-regular-expr* from the LLVM bitcode.
+ All functions matching the regular expression will be extracted. May be
+ specified multiple times.
+
+
+
+**--glob** *global-name*
+
+ Extract the global variable named *global-name* from the LLVM bitcode. May be
+ specified multiple times to extract multiple global variables at once.
+
+
+
+**--rglob** *glob-regular-expr*
+
+ Extract the global variable(s) matching *global-regular-expr* from the LLVM
+ bitcode. All global variables matching the regular expression will be extracted.
+ May be specified multiple times.
+
+
+
+**-help**
+
+ Print a summary of command line options.
+
+
+
+**-o** *filename*
+
+ Specify the output filename. If filename is "-" (the default), then
+ **llvm-extract** sends its output to standard output.
+
+
+
+**-S**
+
+ Write output in LLVM intermediate language (instead of bitcode).
+
+
+
+
+EXIT STATUS
+-----------
+
+
+If **llvm-extract** succeeds, it will exit with 0. Otherwise, if an error
+occurs, it will exit with a non-zero value.
+
+
+SEE ALSO
+--------
+
+
+bugpoint|bugpoint
diff --git a/docs/CommandGuide/llvm-link.rst b/docs/CommandGuide/llvm-link.rst
new file mode 100644
index 00000000000..63019d7cca7
--- /dev/null
+++ b/docs/CommandGuide/llvm-link.rst
@@ -0,0 +1,96 @@
+llvm-link - LLVM linker
+=======================
+
+
+SYNOPSIS
+--------
+
+
+**llvm-link** [*options*] *filename ...*
+
+
+DESCRIPTION
+-----------
+
+
+**llvm-link** takes several LLVM bitcode files and links them together into a
+single LLVM bitcode file. It writes the output file to standard output, unless
+the **-o** option is used to specify a filename.
+
+**llvm-link** attempts to load the input files from the current directory. If
+that fails, it looks for each file in each of the directories specified by the
+**-L** options on the command line. The library search paths are global; each
+one is searched for every input file if necessary. The directories are searched
+in the order they were specified on the command line.
+
+
+OPTIONS
+-------
+
+
+
+**-L** *directory*
+
+ Add the specified *directory* to the library search path. When looking for
+ libraries, **llvm-link** will look in path name for libraries. This option can be
+ specified multiple times; **llvm-link** will search inside these directories in
+ the order in which they were specified on the command line.
+
+
+
+**-f**
+
+ Enable binary output on terminals. Normally, **llvm-link** will refuse to
+ write raw bitcode output if the output stream is a terminal. With this option,
+ **llvm-link** will write raw bitcode regardless of the output device.
+
+
+
+**-o** *filename*
+
+ Specify the output file name. If *filename* is ``-``, then **llvm-link** will
+ write its output to standard output.
+
+
+
+**-S**
+
+ Write output in LLVM intermediate language (instead of bitcode).
+
+
+
+**-d**
+
+ If specified, **llvm-link** prints a human-readable version of the output
+ bitcode file to standard error.
+
+
+
+**-help**
+
+ Print a summary of command line options.
+
+
+
+**-v**
+
+ Verbose mode. Print information about what **llvm-link** is doing. This
+ typically includes a message for each bitcode file linked in and for each
+ library found.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+If **llvm-link** succeeds, it will exit with 0. Otherwise, if an error
+occurs, it will exit with a non-zero value.
+
+
+SEE ALSO
+--------
+
+
+gccld|gccld
diff --git a/docs/CommandGuide/llvm-nm.rst b/docs/CommandGuide/llvm-nm.rst
new file mode 100644
index 00000000000..cbc7af20759
--- /dev/null
+++ b/docs/CommandGuide/llvm-nm.rst
@@ -0,0 +1,189 @@
+llvm-nm - list LLVM bitcode and object file's symbol table
+==========================================================
+
+
+SYNOPSIS
+--------
+
+
+:program:`llvm-nm` [*options*] [*filenames...*]
+
+
+DESCRIPTION
+-----------
+
+
+The :program:`llvm-nm` utility lists the names of symbols from the LLVM bitcode
+files, object files, or :program:`ar` archives containing them, named on the
+command line. Each symbol is listed along with some simple information about its
+provenance. If no file name is specified, or *-* is used as a file name,
+:program:`llvm-nm` will process a file on its standard input stream.
+
+:program:`llvm-nm`'s default output format is the traditional BSD :program:`nm`
+output format. Each such output record consists of an (optional) 8-digit
+hexadecimal address, followed by a type code character, followed by a name, for
+each symbol. One record is printed per line; fields are separated by spaces.
+When the address is omitted, it is replaced by 8 spaces.
+
+Type code characters currently supported, and their meanings, are as follows:
+
+
+U
+
+ Named object is referenced but undefined in this bitcode file
+
+
+
+C
+
+ Common (multiple definitions link together into one def)
+
+
+
+W
+
+ Weak reference (multiple definitions link together into zero or one definitions)
+
+
+
+t
+
+ Local function (text) object
+
+
+
+T
+
+ Global function (text) object
+
+
+
+d
+
+ Local data object
+
+
+
+D
+
+ Global data object
+
+
+
+?
+
+ Something unrecognizable
+
+
+
+Because LLVM bitcode files typically contain objects that are not considered to
+have addresses until they are linked into an executable image or dynamically
+compiled "just-in-time", :program:`llvm-nm` does not print an address for any
+symbol in a LLVM bitcode file, even symbols which are defined in the bitcode
+file.
+
+
+OPTIONS
+-------
+
+
+.. program:: llvm-nm
+
+
+.. option:: -B (default)
+
+ Use BSD output format. Alias for :option:`--format=bsd`.
+
+
+.. option:: -P
+
+ Use POSIX.2 output format. Alias for :option:`--format=posix`.
+
+
+.. option:: --debug-syms, -a
+
+ Show all symbols, even debugger only.
+
+
+.. option:: --defined-only
+
+ Print only symbols defined in this file (as opposed to
+ symbols which may be referenced by objects in this file, but not
+ defined in this file.)
+
+
+.. option:: --dynamic, -D
+
+ Display dynamic symbols instead of normal symbols.
+
+
+.. option:: --extern-only, -g
+
+ Print only symbols whose definitions are external; that is, accessible
+ from other files.
+
+
+.. option:: --format=format, -f format
+
+ Select an output format; *format* may be *sysv*, *posix*, or *bsd*. The default
+ is *bsd*.
+
+
+.. option:: -help
+
+ Print a summary of command-line options and their meanings.
+
+
+.. option:: --no-sort, -p
+
+ Shows symbols in order encountered.
+
+
+.. option:: --numeric-sort, -n, -v
+
+ Sort symbols by address.
+
+
+.. option:: --print-file-name, -A, -o
+
+ Precede each symbol with the file it came from.
+
+
+.. option:: --print-size, -S
+
+ Show symbol size instead of address.
+
+
+.. option:: --size-sort
+
+ Sort symbols by size.
+
+
+.. option:: --undefined-only, -u
+
+ Print only symbols referenced but not defined in this file.
+
+
+BUGS
+----
+
+
+ * :program:`llvm-nm` cannot demangle C++ mangled names, like GNU :program:`nm`
+ can.
+
+ * :program:`llvm-nm` does not support the full set of arguments that GNU
+ :program:`nm` does.
+
+
+EXIT STATUS
+-----------
+
+
+:program:`llvm-nm` exits with an exit code of zero.
+
+
+SEE ALSO
+--------
+
+
+llvm-dis|llvm-dis, ar(1), nm(1)
diff --git a/docs/CommandGuide/llvm-prof.rst b/docs/CommandGuide/llvm-prof.rst
new file mode 100644
index 00000000000..e8d0b19ca94
--- /dev/null
+++ b/docs/CommandGuide/llvm-prof.rst
@@ -0,0 +1,63 @@
+llvm-prof - print execution profile of LLVM program
+===================================================
+
+
+SYNOPSIS
+--------
+
+
+**llvm-prof** [*options*] [*bitcode file*] [*llvmprof.out*]
+
+
+DESCRIPTION
+-----------
+
+
+The **llvm-prof** tool reads in an *llvmprof.out* file (which can
+optionally use a specific file with the third program argument), a bitcode file
+for the program, and produces a human readable report, suitable for determining
+where the program hotspots are.
+
+This program is often used in conjunction with the *utils/profile.pl*
+script. This script automatically instruments a program, runs it with the JIT,
+then runs **llvm-prof** to format a report. To get more information about
+*utils/profile.pl*, execute it with the **-help** option.
+
+
+OPTIONS
+-------
+
+
+
+**--annotated-llvm** or **-A**
+
+ In addition to the normal report printed, print out the code for the
+ program, annotated with execution frequency information. This can be
+ particularly useful when trying to visualize how frequently basic blocks
+ are executed. This is most useful with basic block profiling
+ information or better.
+
+
+
+**--print-all-code**
+
+ Using this option enables the **--annotated-llvm** option, but it
+ prints the entire module, instead of just the most commonly executed
+ functions.
+
+
+
+**--time-passes**
+
+ Record the amount of time needed for each pass and print it to standard
+ error.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+**llvm-prof** returns 1 if it cannot load the bitcode file or the profile
+information. Otherwise, it exits with zero.
diff --git a/docs/CommandGuide/llvm-ranlib.rst b/docs/CommandGuide/llvm-ranlib.rst
new file mode 100644
index 00000000000..6658818f41e
--- /dev/null
+++ b/docs/CommandGuide/llvm-ranlib.rst
@@ -0,0 +1,61 @@
+llvm-ranlib - Generate index for LLVM archive
+=============================================
+
+
+SYNOPSIS
+--------
+
+
+**llvm-ranlib** [--version] [-help] <archive-file>
+
+
+DESCRIPTION
+-----------
+
+
+The **llvm-ranlib** command is similar to the common Unix utility, ``ranlib``. It
+adds or updates the symbol table in an LLVM archive file. Note that using the
+**llvm-ar** modifier *s* is usually more efficient than running **llvm-ranlib**
+which is only provided only for completness and compatibility. Unlike other
+implementations of ``ranlib``, **llvm-ranlib** indexes LLVM bitcode files, not
+native object modules. You can list the contents of the symbol table with the
+``llvm-nm -s`` command.
+
+
+OPTIONS
+-------
+
+
+
+*archive-file*
+
+ Specifies the archive-file to which the symbol table is added or updated.
+
+
+
+*--version*
+
+ Print the version of **llvm-ranlib** and exit without building a symbol table.
+
+
+
+*-help*
+
+ Print usage help for **llvm-ranlib** and exit without building a symbol table.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+If **llvm-ranlib** succeeds, it will exit with 0. If an error occurs, a non-zero
+exit code will be returned.
+
+
+SEE ALSO
+--------
+
+
+llvm-ar|llvm-ar, ranlib(1)
diff --git a/docs/CommandGuide/llvm-stress.rst b/docs/CommandGuide/llvm-stress.rst
new file mode 100644
index 00000000000..44aa32c7557
--- /dev/null
+++ b/docs/CommandGuide/llvm-stress.rst
@@ -0,0 +1,48 @@
+llvm-stress - generate random .ll files
+=======================================
+
+
+SYNOPSIS
+--------
+
+
+**llvm-stress** [-size=filesize] [-seed=initialseed] [-o=outfile]
+
+
+DESCRIPTION
+-----------
+
+
+The **llvm-stress** tool is used to generate random .ll files that can be used to
+test different components of LLVM.
+
+
+OPTIONS
+-------
+
+
+
+**-o** *filename*
+
+ Specify the output filename.
+
+
+
+**-size** *size*
+
+ Specify the size of the generated .ll file.
+
+
+
+**-seed** *seed*
+
+ Specify the seed to be used for the randomly generated instructions.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+**llvm-stress** returns 0.
diff --git a/docs/CommandGuide/opt.rst b/docs/CommandGuide/opt.rst
new file mode 100644
index 00000000000..72f19034c9e
--- /dev/null
+++ b/docs/CommandGuide/opt.rst
@@ -0,0 +1,183 @@
+opt - LLVM optimizer
+====================
+
+
+SYNOPSIS
+--------
+
+
+**opt** [*options*] [*filename*]
+
+
+DESCRIPTION
+-----------
+
+
+The **opt** command is the modular LLVM optimizer and analyzer. It takes LLVM
+source files as input, runs the specified optimizations or analyses on it, and then
+outputs the optimized file or the analysis results. The function of
+**opt** depends on whether the **-analyze** option is given.
+
+When **-analyze** is specified, **opt** performs various analyses of the input
+source. It will usually print the results on standard output, but in a few
+cases, it will print output to standard error or generate a file with the
+analysis output, which is usually done when the output is meant for another
+program.
+
+While **-analyze** is *not* given, **opt** attempts to produce an optimized
+output file. The optimizations available via **opt** depend upon what
+libraries were linked into it as well as any additional libraries that have
+been loaded with the **-load** option. Use the **-help** option to determine
+what optimizations you can use.
+
+If *filename* is omitted from the command line or is *-*, **opt** reads its
+input from standard input. Inputs can be in either the LLVM assembly language
+format (.ll) or the LLVM bitcode format (.bc).
+
+If an output filename is not specified with the **-o** option, **opt**
+writes its output to the standard output.
+
+
+OPTIONS
+-------
+
+
+
+**-f**
+
+ Enable binary output on terminals. Normally, **opt** will refuse to
+ write raw bitcode output if the output stream is a terminal. With this option,
+ **opt** will write raw bitcode regardless of the output device.
+
+
+
+**-help**
+
+ Print a summary of command line options.
+
+
+
+**-o** *filename*
+
+ Specify the output filename.
+
+
+
+**-S**
+
+ Write output in LLVM intermediate language (instead of bitcode).
+
+
+
+**-{passname}**
+
+ **opt** provides the ability to run any of LLVM's optimization or analysis passes
+ in any order. The **-help** option lists all the passes available. The order in
+ which the options occur on the command line are the order in which they are
+ executed (within pass constraints).
+
+
+
+**-std-compile-opts**
+
+ This is short hand for a standard list of *compile time optimization* passes.
+ This is typically used to optimize the output from the llvm-gcc front end. It
+ might be useful for other front end compilers as well. To discover the full set
+ of options available, use the following command:
+
+
+ .. code-block:: sh
+
+ llvm-as < /dev/null | opt -std-compile-opts -disable-output -debug-pass=Arguments
+
+
+
+
+**-disable-inlining**
+
+ This option is only meaningful when **-std-compile-opts** is given. It simply
+ removes the inlining pass from the standard list.
+
+
+
+**-disable-opt**
+
+ This option is only meaningful when **-std-compile-opts** is given. It disables
+ most, but not all, of the **-std-compile-opts**. The ones that remain are
+ **-verify**, **-lower-setjmp**, and **-funcresolve**.
+
+
+
+**-strip-debug**
+
+ This option causes opt to strip debug information from the module before
+ applying other optimizations. It is essentially the same as **-strip** but it
+ ensures that stripping of debug information is done first.
+
+
+
+**-verify-each**
+
+ This option causes opt to add a verify pass after every pass otherwise specified
+ on the command line (including **-verify**). This is useful for cases where it
+ is suspected that a pass is creating an invalid module but it is not clear which
+ pass is doing it. The combination of **-std-compile-opts** and **-verify-each**
+ can quickly track down this kind of problem.
+
+
+
+**-profile-info-file** *filename*
+
+ Specify the name of the file loaded by the -profile-loader option.
+
+
+
+**-stats**
+
+ Print statistics.
+
+
+
+**-time-passes**
+
+ Record the amount of time needed for each pass and print it to standard
+ error.
+
+
+
+**-debug**
+
+ If this is a debug build, this option will enable debug printouts
+ from passes which use the *DEBUG()* macro. See the **LLVM Programmer's
+ Manual**, section *#DEBUG* for more information.
+
+
+
+**-load**\ =\ *plugin*
+
+ Load the dynamic object *plugin*. This object should register new optimization
+ or analysis passes. Once loaded, the object will add new command line options to
+ enable various optimizations or analyses. To see the new complete list of
+ optimizations, use the **-help** and **-load** options together. For example:
+
+
+ .. code-block:: sh
+
+ opt -load=plugin.so -help
+
+
+
+
+**-p**
+
+ Print module after each transformation.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+If **opt** succeeds, it will exit with 0. Otherwise, if an error
+occurs, it will exit with a non-zero value.
diff --git a/docs/CommandGuide/tblgen.rst b/docs/CommandGuide/tblgen.rst
new file mode 100644
index 00000000000..2d191676d9f
--- /dev/null
+++ b/docs/CommandGuide/tblgen.rst
@@ -0,0 +1,186 @@
+tblgen - Target Description To C++ Code Generator
+=================================================
+
+
+SYNOPSIS
+--------
+
+
+**tblgen** [*options*] [*filename*]
+
+
+DESCRIPTION
+-----------
+
+
+**tblgen** translates from target description (.td) files into C++ code that can
+be included in the definition of an LLVM target library. Most users of LLVM will
+not need to use this program. It is only for assisting with writing an LLVM
+target backend.
+
+The input and output of **tblgen** is beyond the scope of this short
+introduction. Please see the *CodeGeneration* page in the LLVM documentation.
+
+The *filename* argument specifies the name of a Target Description (.td) file
+to read as input.
+
+
+OPTIONS
+-------
+
+
+
+**-help**
+
+ Print a summary of command line options.
+
+
+
+**-o** *filename*
+
+ Specify the output file name. If *filename* is ``-``, then **tblgen**
+ sends its output to standard output.
+
+
+
+**-I** *directory*
+
+ Specify where to find other target description files for inclusion. The
+ *directory* value should be a full or partial path to a directory that contains
+ target description files.
+
+
+
+**-asmparsernum** *N*
+
+ Make -gen-asm-parser emit assembly writer number *N*.
+
+
+
+**-asmwriternum** *N*
+
+ Make -gen-asm-writer emit assembly writer number *N*.
+
+
+
+**-class** *class Name*
+
+ Print the enumeration list for this class.
+
+
+
+**-print-records**
+
+ Print all records to standard output (default).
+
+
+
+**-print-enums**
+
+ Print enumeration values for a class
+
+
+
+**-print-sets**
+
+ Print expanded sets for testing DAG exprs.
+
+
+
+**-gen-emitter**
+
+ Generate machine code emitter.
+
+
+
+**-gen-register-info**
+
+ Generate registers and register classes info.
+
+
+
+**-gen-instr-info**
+
+ Generate instruction descriptions.
+
+
+
+**-gen-asm-writer**
+
+ Generate the assembly writer.
+
+
+
+**-gen-disassembler**
+
+ Generate disassembler.
+
+
+
+**-gen-pseudo-lowering**
+
+ Generate pseudo instruction lowering.
+
+
+
+**-gen-dag-isel**
+
+ Generate a DAG (Directed Acycle Graph) instruction selector.
+
+
+
+**-gen-asm-matcher**
+
+ Generate assembly instruction matcher.
+
+
+
+**-gen-dfa-packetizer**
+
+ Generate DFA Packetizer for VLIW targets.
+
+
+
+**-gen-fast-isel**
+
+ Generate a "fast" instruction selector.
+
+
+
+**-gen-subtarget**
+
+ Generate subtarget enumerations.
+
+
+
+**-gen-intrinsic**
+
+ Generate intrinsic information.
+
+
+
+**-gen-tgt-intrinsic**
+
+ Generate target intrinsic information.
+
+
+
+**-gen-enhanced-disassembly-info**
+
+ Generate enhanced disassembly info.
+
+
+
+**-version**
+
+ Show the version number of this program.
+
+
+
+
+EXIT STATUS
+-----------
+
+
+If **tblgen** succeeds, it will exit with 0. Otherwise, if an error
+occurs, it will exit with a non-zero value.
diff --git a/docs/CommandLine.rst b/docs/CommandLine.rst
new file mode 100644
index 00000000000..302f5a4cf59
--- /dev/null
+++ b/docs/CommandLine.rst
@@ -0,0 +1,1615 @@
+.. _commandline:
+
+==============================
+CommandLine 2.0 Library Manual
+==============================
+
+Introduction
+============
+
+This document describes the CommandLine argument processing library. It will
+show you how to use it, and what it can do. The CommandLine library uses a
+declarative approach to specifying the command line options that your program
+takes. By default, these options declarations implicitly hold the value parsed
+for the option declared (of course this `can be changed`_).
+
+Although there are a **lot** of command line argument parsing libraries out
+there in many different languages, none of them fit well with what I needed. By
+looking at the features and problems of other libraries, I designed the
+CommandLine library to have the following features:
+
+#. Speed: The CommandLine library is very quick and uses little resources. The
+ parsing time of the library is directly proportional to the number of
+ arguments parsed, not the number of options recognized. Additionally,
+ command line argument values are captured transparently into user defined
+ global variables, which can be accessed like any other variable (and with the
+ same performance).
+
+#. Type Safe: As a user of CommandLine, you don't have to worry about
+ remembering the type of arguments that you want (is it an int? a string? a
+ bool? an enum?) and keep casting it around. Not only does this help prevent
+ error prone constructs, it also leads to dramatically cleaner source code.
+
+#. No subclasses required: To use CommandLine, you instantiate variables that
+ correspond to the arguments that you would like to capture, you don't
+ subclass a parser. This means that you don't have to write **any**
+ boilerplate code.
+
+#. Globally accessible: Libraries can specify command line arguments that are
+ automatically enabled in any tool that links to the library. This is
+ possible because the application doesn't have to keep a list of arguments to
+ pass to the parser. This also makes supporting `dynamically loaded options`_
+ trivial.
+
+#. Cleaner: CommandLine supports enum and other types directly, meaning that
+ there is less error and more security built into the library. You don't have
+ to worry about whether your integral command line argument accidentally got
+ assigned a value that is not valid for your enum type.
+
+#. Powerful: The CommandLine library supports many different types of arguments,
+ from simple `boolean flags`_ to `scalars arguments`_ (`strings`_,
+ `integers`_, `enums`_, `doubles`_), to `lists of arguments`_. This is
+ possible because CommandLine is...
+
+#. Extensible: It is very simple to add a new argument type to CommandLine.
+ Simply specify the parser that you want to use with the command line option
+ when you declare it. `Custom parsers`_ are no problem.
+
+#. Labor Saving: The CommandLine library cuts down on the amount of grunt work
+ that you, the user, have to do. For example, it automatically provides a
+ ``-help`` option that shows the available command line options for your tool.
+ Additionally, it does most of the basic correctness checking for you.
+
+#. Capable: The CommandLine library can handle lots of different forms of
+ options often found in real programs. For example, `positional`_ arguments,
+ ``ls`` style `grouping`_ options (to allow processing '``ls -lad``'
+ naturally), ``ld`` style `prefix`_ options (to parse '``-lmalloc
+ -L/usr/lib``'), and interpreter style options.
+
+This document will hopefully let you jump in and start using CommandLine in your
+utility quickly and painlessly. Additionally it should be a simple reference
+manual to figure out how stuff works. If it is failing in some area (or you
+want an extension to the library), nag the author, `Chris
+Lattner <mailto:sabre@nondot.org>`_.
+
+Quick Start Guide
+=================
+
+This section of the manual runs through a simple CommandLine'ification of a
+basic compiler tool. This is intended to show you how to jump into using the
+CommandLine library in your own program, and show you some of the cool things it
+can do.
+
+To start out, you need to include the CommandLine header file into your program:
+
+.. code-block:: c++
+
+ #include "llvm/Support/CommandLine.h"
+
+Additionally, you need to add this as the first line of your main program:
+
+.. code-block:: c++
+
+ int main(int argc, char **argv) {
+ cl::ParseCommandLineOptions(argc, argv);
+ ...
+ }
+
+... which actually parses the arguments and fills in the variable declarations.
+
+Now that you are ready to support command line arguments, we need to tell the
+system which ones we want, and what type of arguments they are. The CommandLine
+library uses a declarative syntax to model command line arguments with the
+global variable declarations that capture the parsed values. This means that
+for every command line option that you would like to support, there should be a
+global variable declaration to capture the result. For example, in a compiler,
+we would like to support the Unix-standard '``-o <filename>``' option to specify
+where to put the output. With the CommandLine library, this is represented like
+this:
+
+.. _scalars arguments:
+.. _here:
+
+.. code-block:: c++
+
+ cl::opt<string> OutputFilename("o", cl::desc("Specify output filename"), cl::value_desc("filename"));
+
+This declares a global variable "``OutputFilename``" that is used to capture the
+result of the "``o``" argument (first parameter). We specify that this is a
+simple scalar option by using the "``cl::opt``" template (as opposed to the
+"``cl::list``" template), and tell the CommandLine library that the data
+type that we are parsing is a string.
+
+The second and third parameters (which are optional) are used to specify what to
+output for the "``-help``" option. In this case, we get a line that looks like
+this:
+
+::
+
+ USAGE: compiler [options]
+
+ OPTIONS:
+ -help - display available options (-help-hidden for more)
+ -o <filename> - Specify output filename
+
+Because we specified that the command line option should parse using the
+``string`` data type, the variable declared is automatically usable as a real
+string in all contexts that a normal C++ string object may be used. For
+example:
+
+.. code-block:: c++
+
+ ...
+ std::ofstream Output(OutputFilename.c_str());
+ if (Output.good()) ...
+ ...
+
+There are many different options that you can use to customize the command line
+option handling library, but the above example shows the general interface to
+these options. The options can be specified in any order, and are specified
+with helper functions like `cl::desc(...)`_, so there are no positional
+dependencies to remember. The available options are discussed in detail in the
+`Reference Guide`_.
+
+Continuing the example, we would like to have our compiler take an input
+filename as well as an output filename, but we do not want the input filename to
+be specified with a hyphen (ie, not ``-filename.c``). To support this style of
+argument, the CommandLine library allows for `positional`_ arguments to be
+specified for the program. These positional arguments are filled with command
+line parameters that are not in option form. We use this feature like this:
+
+.. code-block:: c++
+
+
+ cl::opt<string> InputFilename(cl::Positional, cl::desc("<input file>"), cl::init("-"));
+
+This declaration indicates that the first positional argument should be treated
+as the input filename. Here we use the `cl::init`_ option to specify an initial
+value for the command line option, which is used if the option is not specified
+(if you do not specify a `cl::init`_ modifier for an option, then the default
+constructor for the data type is used to initialize the value). Command line
+options default to being optional, so if we would like to require that the user
+always specify an input filename, we would add the `cl::Required`_ flag, and we
+could eliminate the `cl::init`_ modifier, like this:
+
+.. code-block:: c++
+
+ cl::opt<string> InputFilename(cl::Positional, cl::desc("<input file>"), cl::Required);
+
+Again, the CommandLine library does not require the options to be specified in
+any particular order, so the above declaration is equivalent to:
+
+.. code-block:: c++
+
+ cl::opt<string> InputFilename(cl::Positional, cl::Required, cl::desc("<input file>"));
+
+By simply adding the `cl::Required`_ flag, the CommandLine library will
+automatically issue an error if the argument is not specified, which shifts all
+of the command line option verification code out of your application into the
+library. This is just one example of how using flags can alter the default
+behaviour of the library, on a per-option basis. By adding one of the
+declarations above, the ``-help`` option synopsis is now extended to:
+
+::
+
+ USAGE: compiler [options] <input file>
+
+ OPTIONS:
+ -help - display available options (-help-hidden for more)
+ -o <filename> - Specify output filename
+
+... indicating that an input filename is expected.
+
+Boolean Arguments
+-----------------
+
+In addition to input and output filenames, we would like the compiler example to
+support three boolean flags: "``-f``" to force writing binary output to a
+terminal, "``--quiet``" to enable quiet mode, and "``-q``" for backwards
+compatibility with some of our users. We can support these by declaring options
+of boolean type like this:
+
+.. code-block:: c++
+
+ cl::opt<bool> Force ("f", cl::desc("Enable binary output on terminals"));
+ cl::opt<bool> Quiet ("quiet", cl::desc("Don't print informational messages"));
+ cl::opt<bool> Quiet2("q", cl::desc("Don't print informational messages"), cl::Hidden);
+
+This does what you would expect: it declares three boolean variables
+("``Force``", "``Quiet``", and "``Quiet2``") to recognize these options. Note
+that the "``-q``" option is specified with the "`cl::Hidden`_" flag. This
+modifier prevents it from being shown by the standard "``-help``" output (note
+that it is still shown in the "``-help-hidden``" output).
+
+The CommandLine library uses a `different parser`_ for different data types.
+For example, in the string case, the argument passed to the option is copied
+literally into the content of the string variable... we obviously cannot do that
+in the boolean case, however, so we must use a smarter parser. In the case of
+the boolean parser, it allows no options (in which case it assigns the value of
+true to the variable), or it allows the values "``true``" or "``false``" to be
+specified, allowing any of the following inputs:
+
+::
+
+ compiler -f # No value, 'Force' == true
+ compiler -f=true # Value specified, 'Force' == true
+ compiler -f=TRUE # Value specified, 'Force' == true
+ compiler -f=FALSE # Value specified, 'Force' == false
+
+... you get the idea. The `bool parser`_ just turns the string values into
+boolean values, and rejects things like '``compiler -f=foo``'. Similarly, the
+`float`_, `double`_, and `int`_ parsers work like you would expect, using the
+'``strtol``' and '``strtod``' C library calls to parse the string value into the
+specified data type.
+
+With the declarations above, "``compiler -help``" emits this:
+
+::
+
+ USAGE: compiler [options] <input file>
+
+ OPTIONS:
+ -f - Enable binary output on terminals
+ -o - Override output filename
+ -quiet - Don't print informational messages
+ -help - display available options (-help-hidden for more)
+
+and "``compiler -help-hidden``" prints this:
+
+::
+
+ USAGE: compiler [options] <input file>
+
+ OPTIONS:
+ -f - Enable binary output on terminals
+ -o - Override output filename
+ -q - Don't print informational messages
+ -quiet - Don't print informational messages
+ -help - display available options (-help-hidden for more)
+
+This brief example has shown you how to use the '`cl::opt`_' class to parse
+simple scalar command line arguments. In addition to simple scalar arguments,
+the CommandLine library also provides primitives to support CommandLine option
+`aliases`_, and `lists`_ of options.
+
+.. _aliases:
+
+Argument Aliases
+----------------
+
+So far, the example works well, except for the fact that we need to check the
+quiet condition like this now:
+
+.. code-block:: c++
+
+ ...
+ if (!Quiet && !Quiet2) printInformationalMessage(...);
+ ...
+
+... which is a real pain! Instead of defining two values for the same
+condition, we can use the "`cl::alias`_" class to make the "``-q``" option an
+**alias** for the "``-quiet``" option, instead of providing a value itself:
+
+.. code-block:: c++
+
+ cl::opt<bool> Force ("f", cl::desc("Overwrite output files"));
+ cl::opt<bool> Quiet ("quiet", cl::desc("Don't print informational messages"));
+ cl::alias QuietA("q", cl::desc("Alias for -quiet"), cl::aliasopt(Quiet));
+
+The third line (which is the only one we modified from above) defines a "``-q``"
+alias that updates the "``Quiet``" variable (as specified by the `cl::aliasopt`_
+modifier) whenever it is specified. Because aliases do not hold state, the only
+thing the program has to query is the ``Quiet`` variable now. Another nice
+feature of aliases is that they automatically hide themselves from the ``-help``
+output (although, again, they are still visible in the ``-help-hidden output``).
+
+Now the application code can simply use:
+
+.. code-block:: c++
+
+ ...
+ if (!Quiet) printInformationalMessage(...);
+ ...
+
+... which is much nicer! The "`cl::alias`_" can be used to specify an
+alternative name for any variable type, and has many uses.
+
+.. _unnamed alternatives using the generic parser:
+
+Selecting an alternative from a set of possibilities
+----------------------------------------------------
+
+So far we have seen how the CommandLine library handles builtin types like
+``std::string``, ``bool`` and ``int``, but how does it handle things it doesn't
+know about, like enums or '``int*``'s?
+
+The answer is that it uses a table-driven generic parser (unless you specify
+your own parser, as described in the `Extension Guide`_). This parser maps
+literal strings to whatever type is required, and requires you to tell it what
+this mapping should be.
+
+Let's say that we would like to add four optimization levels to our optimizer,
+using the standard flags "``-g``", "``-O0``", "``-O1``", and "``-O2``". We
+could easily implement this with boolean options like above, but there are
+several problems with this strategy:
+
+#. A user could specify more than one of the options at a time, for example,
+ "``compiler -O3 -O2``". The CommandLine library would not be able to catch
+ this erroneous input for us.
+
+#. We would have to test 4 different variables to see which ones are set.
+
+#. This doesn't map to the numeric levels that we want... so we cannot easily
+ see if some level >= "``-O1``" is enabled.
+
+To cope with these problems, we can use an enum value, and have the CommandLine
+library fill it in with the appropriate level directly, which is used like this:
+
+.. code-block:: c++
+
+ enum OptLevel {
+ g, O1, O2, O3
+ };
+
+ cl::opt<OptLevel> OptimizationLevel(cl::desc("Choose optimization level:"),
+ cl::values(
+ clEnumVal(g , "No optimizations, enable debugging"),
+ clEnumVal(O1, "Enable trivial optimizations"),
+ clEnumVal(O2, "Enable default optimizations"),
+ clEnumVal(O3, "Enable expensive optimizations"),
+ clEnumValEnd));
+
+ ...
+ if (OptimizationLevel >= O2) doPartialRedundancyElimination(...);
+ ...
+
+This declaration defines a variable "``OptimizationLevel``" of the
+"``OptLevel``" enum type. This variable can be assigned any of the values that
+are listed in the declaration (Note that the declaration list must be terminated
+with the "``clEnumValEnd``" argument!). The CommandLine library enforces that
+the user can only specify one of the options, and it ensure that only valid enum
+values can be specified. The "``clEnumVal``" macros ensure that the command
+line arguments matched the enum values. With this option added, our help output
+now is:
+
+::
+
+ USAGE: compiler [options] <input file>
+
+ OPTIONS:
+ Choose optimization level:
+ -g - No optimizations, enable debugging
+ -O1 - Enable trivial optimizations
+ -O2 - Enable default optimizations
+ -O3 - Enable expensive optimizations
+ -f - Enable binary output on terminals
+ -help - display available options (-help-hidden for more)
+ -o <filename> - Specify output filename
+ -quiet - Don't print informational messages
+
+In this case, it is sort of awkward that flag names correspond directly to enum
+names, because we probably don't want a enum definition named "``g``" in our
+program. Because of this, we can alternatively write this example like this:
+
+.. code-block:: c++
+
+ enum OptLevel {
+ Debug, O1, O2, O3
+ };
+
+ cl::opt<OptLevel> OptimizationLevel(cl::desc("Choose optimization level:"),
+ cl::values(
+ clEnumValN(Debug, "g", "No optimizations, enable debugging"),
+ clEnumVal(O1 , "Enable trivial optimizations"),
+ clEnumVal(O2 , "Enable default optimizations"),
+ clEnumVal(O3 , "Enable expensive optimizations"),
+ clEnumValEnd));
+
+ ...
+ if (OptimizationLevel == Debug) outputDebugInfo(...);
+ ...
+
+By using the "``clEnumValN``" macro instead of "``clEnumVal``", we can directly
+specify the name that the flag should get. In general a direct mapping is nice,
+but sometimes you can't or don't want to preserve the mapping, which is when you
+would use it.
+
+Named Alternatives
+------------------
+
+Another useful argument form is a named alternative style. We shall use this
+style in our compiler to specify different debug levels that can be used.
+Instead of each debug level being its own switch, we want to support the
+following options, of which only one can be specified at a time:
+"``--debug-level=none``", "``--debug-level=quick``",
+"``--debug-level=detailed``". To do this, we use the exact same format as our
+optimization level flags, but we also specify an option name. For this case,
+the code looks like this:
+
+.. code-block:: c++
+
+ enum DebugLev {
+ nodebuginfo, quick, detailed
+ };
+
+ // Enable Debug Options to be specified on the command line
+ cl::opt<DebugLev> DebugLevel("debug_level", cl::desc("Set the debugging level:"),
+ cl::values(
+ clEnumValN(nodebuginfo, "none", "disable debug information"),
+ clEnumVal(quick, "enable quick debug information"),
+ clEnumVal(detailed, "enable detailed debug information"),
+ clEnumValEnd));
+
+This definition defines an enumerated command line variable of type "``enum
+DebugLev``", which works exactly the same way as before. The difference here is
+just the interface exposed to the user of your program and the help output by
+the "``-help``" option:
+
+::
+
+ USAGE: compiler [options] <input file>
+
+ OPTIONS:
+ Choose optimization level:
+ -g - No optimizations, enable debugging
+ -O1 - Enable trivial optimizations
+ -O2 - Enable default optimizations
+ -O3 - Enable expensive optimizations
+ -debug_level - Set the debugging level:
+ =none - disable debug information
+ =quick - enable quick debug information
+ =detailed - enable detailed debug information
+ -f - Enable binary output on terminals
+ -help - display available options (-help-hidden for more)
+ -o <filename> - Specify output filename
+ -quiet - Don't print informational messages
+
+Again, the only structural difference between the debug level declaration and
+the optimization level declaration is that the debug level declaration includes
+an option name (``"debug_level"``), which automatically changes how the library
+processes the argument. The CommandLine library supports both forms so that you
+can choose the form most appropriate for your application.
+
+.. _lists:
+
+Parsing a list of options
+-------------------------
+
+Now that we have the standard run-of-the-mill argument types out of the way,
+lets get a little wild and crazy. Lets say that we want our optimizer to accept
+a **list** of optimizations to perform, allowing duplicates. For example, we
+might want to run: "``compiler -dce -constprop -inline -dce -strip``". In this
+case, the order of the arguments and the number of appearances is very
+important. This is what the "``cl::list``" template is for. First, start by
+defining an enum of the optimizations that you would like to perform:
+
+.. code-block:: c++
+
+ enum Opts {
+ // 'inline' is a C++ keyword, so name it 'inlining'
+ dce, constprop, inlining, strip
+ };
+
+Then define your "``cl::list``" variable:
+
+.. code-block:: c++
+
+ cl::list<Opts> OptimizationList(cl::desc("Available Optimizations:"),
+ cl::values(
+ clEnumVal(dce , "Dead Code Elimination"),
+ clEnumVal(constprop , "Constant Propagation"),
+ clEnumValN(inlining, "inline", "Procedure Integration"),
+ clEnumVal(strip , "Strip Symbols"),
+ clEnumValEnd));
+
+This defines a variable that is conceptually of the type
+"``std::vector<enum Opts>``". Thus, you can access it with standard vector
+methods:
+
+.. code-block:: c++
+
+ for (unsigned i = 0; i != OptimizationList.size(); ++i)
+ switch (OptimizationList[i])
+ ...
+
+... to iterate through the list of options specified.
+
+Note that the "``cl::list``" template is completely general and may be used with
+any data types or other arguments that you can use with the "``cl::opt``"
+template. One especially useful way to use a list is to capture all of the
+positional arguments together if there may be more than one specified. In the
+case of a linker, for example, the linker takes several '``.o``' files, and
+needs to capture them into a list. This is naturally specified as:
+
+.. code-block:: c++
+
+ ...
+ cl::list<std::string> InputFilenames(cl::Positional, cl::desc("<Input files>"), cl::OneOrMore);
+ ...
+
+This variable works just like a "``vector<string>``" object. As such, accessing
+the list is simple, just like above. In this example, we used the
+`cl::OneOrMore`_ modifier to inform the CommandLine library that it is an error
+if the user does not specify any ``.o`` files on our command line. Again, this
+just reduces the amount of checking we have to do.
+
+Collecting options as a set of flags
+------------------------------------
+
+Instead of collecting sets of options in a list, it is also possible to gather
+information for enum values in a **bit vector**. The representation used by the
+`cl::bits`_ class is an ``unsigned`` integer. An enum value is represented by a
+0/1 in the enum's ordinal value bit position. 1 indicating that the enum was
+specified, 0 otherwise. As each specified value is parsed, the resulting enum's
+bit is set in the option's bit vector:
+
+.. code-block:: c++
+
+ bits |= 1 << (unsigned)enum;
+
+Options that are specified multiple times are redundant. Any instances after
+the first are discarded.
+
+Reworking the above list example, we could replace `cl::list`_ with `cl::bits`_:
+
+.. code-block:: c++
+
+ cl::bits<Opts> OptimizationBits(cl::desc("Available Optimizations:"),
+ cl::values(
+ clEnumVal(dce , "Dead Code Elimination"),
+ clEnumVal(constprop , "Constant Propagation"),
+ clEnumValN(inlining, "inline", "Procedure Integration"),
+ clEnumVal(strip , "Strip Symbols"),
+ clEnumValEnd));
+
+To test to see if ``constprop`` was specified, we can use the ``cl:bits::isSet``
+function:
+
+.. code-block:: c++
+
+ if (OptimizationBits.isSet(constprop)) {
+ ...
+ }
+
+It's also possible to get the raw bit vector using the ``cl::bits::getBits``
+function:
+
+.. code-block:: c++
+
+ unsigned bits = OptimizationBits.getBits();
+
+Finally, if external storage is used, then the location specified must be of
+**type** ``unsigned``. In all other ways a `cl::bits`_ option is equivalent to a
+`cl::list`_ option.
+
+.. _additional extra text:
+
+Adding freeform text to help output
+-----------------------------------
+
+As our program grows and becomes more mature, we may decide to put summary
+information about what it does into the help output. The help output is styled
+to look similar to a Unix ``man`` page, providing concise information about a
+program. Unix ``man`` pages, however often have a description about what the
+program does. To add this to your CommandLine program, simply pass a third
+argument to the `cl::ParseCommandLineOptions`_ call in main. This additional
+argument is then printed as the overview information for your program, allowing
+you to include any additional information that you want. For example:
+
+.. code-block:: c++
+
+ int main(int argc, char **argv) {
+ cl::ParseCommandLineOptions(argc, argv, " CommandLine compiler example\n\n"
+ " This program blah blah blah...\n");
+ ...
+ }
+
+would yield the help output:
+
+::
+
+ **OVERVIEW: CommandLine compiler example
+
+ This program blah blah blah...**
+
+ USAGE: compiler [options] <input file>
+
+ OPTIONS:
+ ...
+ -help - display available options (-help-hidden for more)
+ -o <filename> - Specify output filename
+
+.. _Reference Guide:
+
+Reference Guide
+===============
+
+Now that you know the basics of how to use the CommandLine library, this section
+will give you the detailed information you need to tune how command line options
+work, as well as information on more "advanced" command line option processing
+capabilities.
+
+.. _positional:
+.. _positional argument:
+.. _Positional Arguments:
+.. _Positional arguments section:
+.. _positional options:
+
+Positional Arguments
+--------------------
+
+Positional arguments are those arguments that are not named, and are not
+specified with a hyphen. Positional arguments should be used when an option is
+specified by its position alone. For example, the standard Unix ``grep`` tool
+takes a regular expression argument, and an optional filename to search through
+(which defaults to standard input if a filename is not specified). Using the
+CommandLine library, this would be specified as:
+
+.. code-block:: c++
+
+ cl::opt<string> Regex (cl::Positional, cl::desc("<regular expression>"), cl::Required);
+ cl::opt<string> Filename(cl::Positional, cl::desc("<input file>"), cl::init("-"));
+
+Given these two option declarations, the ``-help`` output for our grep
+replacement would look like this:
+
+::
+
+ USAGE: spiffygrep [options] <regular expression> <input file>
+
+ OPTIONS:
+ -help - display available options (-help-hidden for more)
+
+... and the resultant program could be used just like the standard ``grep``
+tool.
+
+Positional arguments are sorted by their order of construction. This means that
+command line options will be ordered according to how they are listed in a .cpp
+file, but will not have an ordering defined if the positional arguments are
+defined in multiple .cpp files. The fix for this problem is simply to define
+all of your positional arguments in one .cpp file.
+
+Specifying positional options with hyphens
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Sometimes you may want to specify a value to your positional argument that
+starts with a hyphen (for example, searching for '``-foo``' in a file). At
+first, you will have trouble doing this, because it will try to find an argument
+named '``-foo``', and will fail (and single quotes will not save you). Note
+that the system ``grep`` has the same problem:
+
+::
+
+ $ spiffygrep '-foo' test.txt
+ Unknown command line argument '-foo'. Try: spiffygrep -help'
+
+ $ grep '-foo' test.txt
+ grep: illegal option -- f
+ grep: illegal option -- o
+ grep: illegal option -- o
+ Usage: grep -hblcnsviw pattern file . . .
+
+The solution for this problem is the same for both your tool and the system
+version: use the '``--``' marker. When the user specifies '``--``' on the
+command line, it is telling the program that all options after the '``--``'
+should be treated as positional arguments, not options. Thus, we can use it
+like this:
+
+::
+
+ $ spiffygrep -- -foo test.txt
+ ...output...
+
+Determining absolute position with getPosition()
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Sometimes an option can affect or modify the meaning of another option. For
+example, consider ``gcc``'s ``-x LANG`` option. This tells ``gcc`` to ignore the
+suffix of subsequent positional arguments and force the file to be interpreted
+as if it contained source code in language ``LANG``. In order to handle this
+properly, you need to know the absolute position of each argument, especially
+those in lists, so their interaction(s) can be applied correctly. This is also
+useful for options like ``-llibname`` which is actually a positional argument
+that starts with a dash.
+
+So, generally, the problem is that you have two ``cl::list`` variables that
+interact in some way. To ensure the correct interaction, you can use the
+``cl::list::getPosition(optnum)`` method. This method returns the absolute
+position (as found on the command line) of the ``optnum`` item in the
+``cl::list``.
+
+The idiom for usage is like this:
+
+.. code-block:: c++
+
+ static cl::list<std::string> Files(cl::Positional, cl::OneOrMore);
+ static cl::list<std::string> Libraries("l", cl::ZeroOrMore);
+
+ int main(int argc, char**argv) {
+ // ...
+ std::vector<std::string>::iterator fileIt = Files.begin();
+ std::vector<std::string>::iterator libIt = Libraries.begin();
+ unsigned libPos = 0, filePos = 0;
+ while ( 1 ) {
+ if ( libIt != Libraries.end() )
+ libPos = Libraries.getPosition( libIt - Libraries.begin() );
+ else
+ libPos = 0;
+ if ( fileIt != Files.end() )
+ filePos = Files.getPosition( fileIt - Files.begin() );
+ else
+ filePos = 0;
+
+ if ( filePos != 0 && (libPos == 0 || filePos < libPos) ) {
+ // Source File Is next
+ ++fileIt;
+ }
+ else if ( libPos != 0 && (filePos == 0 || libPos < filePos) ) {
+ // Library is next
+ ++libIt;
+ }
+ else
+ break; // we're done with the list
+ }
+ }
+
+Note that, for compatibility reasons, the ``cl::opt`` also supports an
+``unsigned getPosition()`` option that will provide the absolute position of
+that option. You can apply the same approach as above with a ``cl::opt`` and a
+``cl::list`` option as you can with two lists.
+
+.. _interpreter style options:
+.. _cl::ConsumeAfter:
+.. _this section for more information:
+
+The ``cl::ConsumeAfter`` modifier
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``cl::ConsumeAfter`` `formatting option`_ is used to construct programs that
+use "interpreter style" option processing. With this style of option
+processing, all arguments specified after the last positional argument are
+treated as special interpreter arguments that are not interpreted by the command
+line argument.
+
+As a concrete example, lets say we are developing a replacement for the standard
+Unix Bourne shell (``/bin/sh``). To run ``/bin/sh``, first you specify options
+to the shell itself (like ``-x`` which turns on trace output), then you specify
+the name of the script to run, then you specify arguments to the script. These
+arguments to the script are parsed by the Bourne shell command line option
+processor, but are not interpreted as options to the shell itself. Using the
+CommandLine library, we would specify this as:
+
+.. code-block:: c++
+
+ cl::opt<string> Script(cl::Positional, cl::desc("<input script>"), cl::init("-"));
+ cl::list<string> Argv(cl::ConsumeAfter, cl::desc("<program arguments>..."));
+ cl::opt<bool> Trace("x", cl::desc("Enable trace output"));
+
+which automatically provides the help output:
+
+::
+
+ USAGE: spiffysh [options] <input script> <program arguments>...
+
+ OPTIONS:
+ -help - display available options (-help-hidden for more)
+ -x - Enable trace output
+
+At runtime, if we run our new shell replacement as ```spiffysh -x test.sh -a -x
+-y bar``', the ``Trace`` variable will be set to true, the ``Script`` variable
+will be set to "``test.sh``", and the ``Argv`` list will contain ``["-a", "-x",
+"-y", "bar"]``, because they were specified after the last positional argument
+(which is the script name).
+
+There are several limitations to when ``cl::ConsumeAfter`` options can be
+specified. For example, only one ``cl::ConsumeAfter`` can be specified per
+program, there must be at least one `positional argument`_ specified, there must
+not be any `cl::list`_ positional arguments, and the ``cl::ConsumeAfter`` option
+should be a `cl::list`_ option.
+
+.. _can be changed:
+.. _Internal vs External Storage:
+
+Internal vs External Storage
+----------------------------
+
+By default, all command line options automatically hold the value that they
+parse from the command line. This is very convenient in the common case,
+especially when combined with the ability to define command line options in the
+files that use them. This is called the internal storage model.
+
+Sometimes, however, it is nice to separate the command line option processing
+code from the storage of the value parsed. For example, lets say that we have a
+'``-debug``' option that we would like to use to enable debug information across
+the entire body of our program. In this case, the boolean value controlling the
+debug code should be globally accessible (in a header file, for example) yet the
+command line option processing code should not be exposed to all of these
+clients (requiring lots of .cpp files to ``#include CommandLine.h``).
+
+To do this, set up your .h file with your option, like this for example:
+
+.. code-block:: c++
+
+ // DebugFlag.h - Get access to the '-debug' command line option
+ //
+
+ // DebugFlag - This boolean is set to true if the '-debug' command line option
+ // is specified. This should probably not be referenced directly, instead, use
+ // the DEBUG macro below.
+ //
+ extern bool DebugFlag;
+
+ // DEBUG macro - This macro should be used by code to emit debug information.
+ // In the '-debug' option is specified on the command line, and if this is a
+ // debug build, then the code specified as the option to the macro will be
+ // executed. Otherwise it will not be.
+ #ifdef NDEBUG
+ #define DEBUG(X)
+ #else
+ #define DEBUG(X) do { if (DebugFlag) { X; } } while (0)
+ #endif
+
+This allows clients to blissfully use the ``DEBUG()`` macro, or the
+``DebugFlag`` explicitly if they want to. Now we just need to be able to set
+the ``DebugFlag`` boolean when the option is set. To do this, we pass an
+additional argument to our command line argument processor, and we specify where
+to fill in with the `cl::location`_ attribute:
+
+.. code-block:: c++
+
+ bool DebugFlag; // the actual value
+ static cl::opt<bool, true> // The parser
+ Debug("debug", cl::desc("Enable debug output"), cl::Hidden, cl::location(DebugFlag));
+
+In the above example, we specify "``true``" as the second argument to the
+`cl::opt`_ template, indicating that the template should not maintain a copy of
+the value itself. In addition to this, we specify the `cl::location`_
+attribute, so that ``DebugFlag`` is automatically set.
+
+Option Attributes
+-----------------
+
+This section describes the basic attributes that you can specify on options.
+
+* The option name attribute (which is required for all options, except
+ `positional options`_) specifies what the option name is. This option is
+ specified in simple double quotes:
+
+ .. code-block:: c++
+
+ cl::opt<**bool**> Quiet("quiet");
+
+.. _cl::desc(...):
+
+* The **cl::desc** attribute specifies a description for the option to be
+ shown in the ``-help`` output for the program.
+
+.. _cl::value_desc:
+
+* The **cl::value_desc** attribute specifies a string that can be used to
+ fine tune the ``-help`` output for a command line option. Look `here`_ for an
+ example.
+
+.. _cl::init:
+
+* The **cl::init** attribute specifies an initial value for a `scalar`_
+ option. If this attribute is not specified then the command line option value
+ defaults to the value created by the default constructor for the
+ type.
+
+ .. warning::
+
+ If you specify both **cl::init** and **cl::location** for an option, you
+ must specify **cl::location** first, so that when the command-line parser
+ sees **cl::init**, it knows where to put the initial value. (You will get an
+ error at runtime if you don't put them in the right order.)
+
+.. _cl::location:
+
+* The **cl::location** attribute where to store the value for a parsed command
+ line option if using external storage. See the section on `Internal vs
+ External Storage`_ for more information.
+
+.. _cl::aliasopt:
+
+* The **cl::aliasopt** attribute specifies which option a `cl::alias`_ option is
+ an alias for.
+
+.. _cl::values:
+
+* The **cl::values** attribute specifies the string-to-value mapping to be used
+ by the generic parser. It takes a **clEnumValEnd terminated** list of
+ (option, value, description) triplets that specify the option name, the value
+ mapped to, and the description shown in the ``-help`` for the tool. Because
+ the generic parser is used most frequently with enum values, two macros are
+ often useful:
+
+ #. The **clEnumVal** macro is used as a nice simple way to specify a triplet
+ for an enum. This macro automatically makes the option name be the same as
+ the enum name. The first option to the macro is the enum, the second is
+ the description for the command line option.
+
+ #. The **clEnumValN** macro is used to specify macro options where the option
+ name doesn't equal the enum name. For this macro, the first argument is
+ the enum value, the second is the flag name, and the second is the
+ description.
+
+ You will get a compile time error if you try to use cl::values with a parser
+ that does not support it.
+
+.. _cl::multi_val:
+
+* The **cl::multi_val** attribute specifies that this option takes has multiple
+ values (example: ``-sectalign segname sectname sectvalue``). This attribute
+ takes one unsigned argument - the number of values for the option. This
+ attribute is valid only on ``cl::list`` options (and will fail with compile
+ error if you try to use it with other option types). It is allowed to use all
+ of the usual modifiers on multi-valued options (besides
+ ``cl::ValueDisallowed``, obviously).
+
+Option Modifiers
+----------------
+
+Option modifiers are the flags and expressions that you pass into the
+constructors for `cl::opt`_ and `cl::list`_. These modifiers give you the
+ability to tweak how options are parsed and how ``-help`` output is generated to
+fit your application well.
+
+These options fall into five main categories:
+
+#. Hiding an option from ``-help`` output
+
+#. Controlling the number of occurrences required and allowed
+
+#. Controlling whether or not a value must be specified
+
+#. Controlling other formatting options
+
+#. Miscellaneous option modifiers
+
+It is not possible to specify two options from the same category (you'll get a
+runtime error) to a single option, except for options in the miscellaneous
+category. The CommandLine library specifies defaults for all of these settings
+that are the most useful in practice and the most common, which mean that you
+usually shouldn't have to worry about these.
+
+Hiding an option from ``-help`` output
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``cl::NotHidden``, ``cl::Hidden``, and ``cl::ReallyHidden`` modifiers are
+used to control whether or not an option appears in the ``-help`` and
+``-help-hidden`` output for the compiled program:
+
+.. _cl::NotHidden:
+
+* The **cl::NotHidden** modifier (which is the default for `cl::opt`_ and
+ `cl::list`_ options) indicates the option is to appear in both help
+ listings.
+
+.. _cl::Hidden:
+
+* The **cl::Hidden** modifier (which is the default for `cl::alias`_ options)
+ indicates that the option should not appear in the ``-help`` output, but
+ should appear in the ``-help-hidden`` output.
+
+.. _cl::ReallyHidden:
+
+* The **cl::ReallyHidden** modifier indicates that the option should not appear
+ in any help output.
+
+Controlling the number of occurrences required and allowed
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This group of options is used to control how many time an option is allowed (or
+required) to be specified on the command line of your program. Specifying a
+value for this setting allows the CommandLine library to do error checking for
+you.
+
+The allowed values for this option group are:
+
+.. _cl::Optional:
+
+* The **cl::Optional** modifier (which is the default for the `cl::opt`_ and
+ `cl::alias`_ classes) indicates that your program will allow either zero or
+ one occurrence of the option to be specified.
+
+.. _cl::ZeroOrMore:
+
+* The **cl::ZeroOrMore** modifier (which is the default for the `cl::list`_
+ class) indicates that your program will allow the option to be specified zero
+ or more times.
+
+.. _cl::Required:
+
+* The **cl::Required** modifier indicates that the specified option must be
+ specified exactly one time.
+
+.. _cl::OneOrMore:
+
+* The **cl::OneOrMore** modifier indicates that the option must be specified at
+ least one time.
+
+* The **cl::ConsumeAfter** modifier is described in the `Positional arguments
+ section`_.
+
+If an option is not specified, then the value of the option is equal to the
+value specified by the `cl::init`_ attribute. If the ``cl::init`` attribute is
+not specified, the option value is initialized with the default constructor for
+the data type.
+
+If an option is specified multiple times for an option of the `cl::opt`_ class,
+only the last value will be retained.
+
+Controlling whether or not a value must be specified
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This group of options is used to control whether or not the option allows a
+value to be present. In the case of the CommandLine library, a value is either
+specified with an equal sign (e.g. '``-index-depth=17``') or as a trailing
+string (e.g. '``-o a.out``').
+
+The allowed values for this option group are:
+
+.. _cl::ValueOptional:
+
+* The **cl::ValueOptional** modifier (which is the default for ``bool`` typed
+ options) specifies that it is acceptable to have a value, or not. A boolean
+ argument can be enabled just by appearing on the command line, or it can have
+ an explicit '``-foo=true``'. If an option is specified with this mode, it is
+ illegal for the value to be provided without the equal sign. Therefore
+ '``-foo true``' is illegal. To get this behavior, you must use
+ the `cl::ValueRequired`_ modifier.
+
+.. _cl::ValueRequired:
+
+* The **cl::ValueRequired** modifier (which is the default for all other types
+ except for `unnamed alternatives using the generic parser`_) specifies that a
+ value must be provided. This mode informs the command line library that if an
+ option is not provides with an equal sign, that the next argument provided
+ must be the value. This allows things like '``-o a.out``' to work.
+
+.. _cl::ValueDisallowed:
+
+* The **cl::ValueDisallowed** modifier (which is the default for `unnamed
+ alternatives using the generic parser`_) indicates that it is a runtime error
+ for the user to specify a value. This can be provided to disallow users from
+ providing options to boolean options (like '``-foo=true``').
+
+In general, the default values for this option group work just like you would
+want them to. As mentioned above, you can specify the `cl::ValueDisallowed`_
+modifier to a boolean argument to restrict your command line parser. These
+options are mostly useful when `extending the library`_.
+
+.. _formatting option:
+
+Controlling other formatting options
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The formatting option group is used to specify that the command line option has
+special abilities and is otherwise different from other command line arguments.
+As usual, you can only specify one of these arguments at most.
+
+.. _cl::NormalFormatting:
+
+* The **cl::NormalFormatting** modifier (which is the default all options)
+ specifies that this option is "normal".
+
+.. _cl::Positional:
+
+* The **cl::Positional** modifier specifies that this is a positional argument
+ that does not have a command line option associated with it. See the
+ `Positional Arguments`_ section for more information.
+
+* The **cl::ConsumeAfter** modifier specifies that this option is used to
+ capture "interpreter style" arguments. See `this section for more
+ information`_.
+
+.. _prefix:
+.. _cl::Prefix:
+
+* The **cl::Prefix** modifier specifies that this option prefixes its value.
+ With 'Prefix' options, the equal sign does not separate the value from the
+ option name specified. Instead, the value is everything after the prefix,
+ including any equal sign if present. This is useful for processing odd
+ arguments like ``-lmalloc`` and ``-L/usr/lib`` in a linker tool or
+ ``-DNAME=value`` in a compiler tool. Here, the '``l``', '``D``' and '``L``'
+ options are normal string (or list) options, that have the **cl::Prefix**
+ modifier added to allow the CommandLine library to recognize them. Note that
+ **cl::Prefix** options must not have the **cl::ValueDisallowed** modifier
+ specified.
+
+.. _grouping:
+.. _cl::Grouping:
+
+* The **cl::Grouping** modifier is used to implement Unix-style tools (like
+ ``ls``) that have lots of single letter arguments, but only require a single
+ dash. For example, the '``ls -labF``' command actually enables four different
+ options, all of which are single letters. Note that **cl::Grouping** options
+ cannot have values.
+
+The CommandLine library does not restrict how you use the **cl::Prefix** or
+**cl::Grouping** modifiers, but it is possible to specify ambiguous argument
+settings. Thus, it is possible to have multiple letter options that are prefix
+or grouping options, and they will still work as designed.
+
+To do this, the CommandLine library uses a greedy algorithm to parse the input
+option into (potentially multiple) prefix and grouping options. The strategy
+basically looks like this:
+
+::
+
+ parse(string OrigInput) {
+
+ 1. string input = OrigInput;
+ 2. if (isOption(input)) return getOption(input).parse(); // Normal option
+ 3. while (!isOption(input) && !input.empty()) input.pop_back(); // Remove the last letter
+ 4. if (input.empty()) return error(); // No matching option
+ 5. if (getOption(input).isPrefix())
+ return getOption(input).parse(input);
+ 6. while (!input.empty()) { // Must be grouping options
+ getOption(input).parse();
+ OrigInput.erase(OrigInput.begin(), OrigInput.begin()+input.length());
+ input = OrigInput;
+ while (!isOption(input) && !input.empty()) input.pop_back();
+ }
+ 7. if (!OrigInput.empty()) error();
+
+ }
+
+Miscellaneous option modifiers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The miscellaneous option modifiers are the only flags where you can specify more
+than one flag from the set: they are not mutually exclusive. These flags
+specify boolean properties that modify the option.
+
+.. _cl::CommaSeparated:
+
+* The **cl::CommaSeparated** modifier indicates that any commas specified for an
+ option's value should be used to split the value up into multiple values for
+ the option. For example, these two options are equivalent when
+ ``cl::CommaSeparated`` is specified: "``-foo=a -foo=b -foo=c``" and
+ "``-foo=a,b,c``". This option only makes sense to be used in a case where the
+ option is allowed to accept one or more values (i.e. it is a `cl::list`_
+ option).
+
+.. _cl::PositionalEatsArgs:
+
+* The **cl::PositionalEatsArgs** modifier (which only applies to positional
+ arguments, and only makes sense for lists) indicates that positional argument
+ should consume any strings after it (including strings that start with a "-")
+ up until another recognized positional argument. For example, if you have two
+ "eating" positional arguments, "``pos1``" and "``pos2``", the string "``-pos1
+ -foo -bar baz -pos2 -bork``" would cause the "``-foo -bar -baz``" strings to
+ be applied to the "``-pos1``" option and the "``-bork``" string to be applied
+ to the "``-pos2``" option.
+
+.. _cl::Sink:
+
+* The **cl::Sink** modifier is used to handle unknown options. If there is at
+ least one option with ``cl::Sink`` modifier specified, the parser passes
+ unrecognized option strings to it as values instead of signaling an error. As
+ with ``cl::CommaSeparated``, this modifier only makes sense with a `cl::list`_
+ option.
+
+So far, these are the only three miscellaneous option modifiers.
+
+.. _response files:
+
+Response files
+^^^^^^^^^^^^^^
+
+Some systems, such as certain variants of Microsoft Windows and some older
+Unices have a relatively low limit on command-line length. It is therefore
+customary to use the so-called 'response files' to circumvent this
+restriction. These files are mentioned on the command-line (using the "@file")
+syntax. The program reads these files and inserts the contents into argv,
+thereby working around the command-line length limits. Response files are
+enabled by an optional fourth argument to `cl::ParseEnvironmentOptions`_ and
+`cl::ParseCommandLineOptions`_.
+
+Top-Level Classes and Functions
+-------------------------------
+
+Despite all of the built-in flexibility, the CommandLine option library really
+only consists of one function `cl::ParseCommandLineOptions`_) and three main
+classes: `cl::opt`_, `cl::list`_, and `cl::alias`_. This section describes
+these three classes in detail.
+
+.. _cl::ParseCommandLineOptions:
+
+The ``cl::ParseCommandLineOptions`` function
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``cl::ParseCommandLineOptions`` function is designed to be called directly
+from ``main``, and is used to fill in the values of all of the command line
+option variables once ``argc`` and ``argv`` are available.
+
+The ``cl::ParseCommandLineOptions`` function requires two parameters (``argc``
+and ``argv``), but may also take an optional third parameter which holds
+`additional extra text`_ to emit when the ``-help`` option is invoked, and a
+fourth boolean parameter that enables `response files`_.
+
+.. _cl::ParseEnvironmentOptions:
+
+The ``cl::ParseEnvironmentOptions`` function
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``cl::ParseEnvironmentOptions`` function has mostly the same effects as
+`cl::ParseCommandLineOptions`_, except that it is designed to take values for
+options from an environment variable, for those cases in which reading the
+command line is not convenient or desired. It fills in the values of all the
+command line option variables just like `cl::ParseCommandLineOptions`_ does.
+
+It takes four parameters: the name of the program (since ``argv`` may not be
+available, it can't just look in ``argv[0]``), the name of the environment
+variable to examine, the optional `additional extra text`_ to emit when the
+``-help`` option is invoked, and the boolean switch that controls whether
+`response files`_ should be read.
+
+``cl::ParseEnvironmentOptions`` will break the environment variable's value up
+into words and then process them using `cl::ParseCommandLineOptions`_.
+**Note:** Currently ``cl::ParseEnvironmentOptions`` does not support quoting, so
+an environment variable containing ``-option "foo bar"`` will be parsed as three
+words, ``-option``, ``"foo``, and ``bar"``, which is different from what you
+would get from the shell with the same input.
+
+The ``cl::SetVersionPrinter`` function
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``cl::SetVersionPrinter`` function is designed to be called directly from
+``main`` and *before* ``cl::ParseCommandLineOptions``. Its use is optional. It
+simply arranges for a function to be called in response to the ``--version``
+option instead of having the ``CommandLine`` library print out the usual version
+string for LLVM. This is useful for programs that are not part of LLVM but wish
+to use the ``CommandLine`` facilities. Such programs should just define a small
+function that takes no arguments and returns ``void`` and that prints out
+whatever version information is appropriate for the program. Pass the address of
+that function to ``cl::SetVersionPrinter`` to arrange for it to be called when
+the ``--version`` option is given by the user.
+
+.. _cl::opt:
+.. _scalar:
+
+The ``cl::opt`` class
+^^^^^^^^^^^^^^^^^^^^^
+
+The ``cl::opt`` class is the class used to represent scalar command line
+options, and is the one used most of the time. It is a templated class which
+can take up to three arguments (all except for the first have default values
+though):
+
+.. code-block:: c++
+
+ namespace cl {
+ template <class DataType, bool ExternalStorage = false,
+ class ParserClass = parser<DataType> >
+ class opt;
+ }
+
+The first template argument specifies what underlying data type the command line
+argument is, and is used to select a default parser implementation. The second
+template argument is used to specify whether the option should contain the
+storage for the option (the default) or whether external storage should be used
+to contain the value parsed for the option (see `Internal vs External Storage`_
+for more information).
+
+The third template argument specifies which parser to use. The default value
+selects an instantiation of the ``parser`` class based on the underlying data
+type of the option. In general, this default works well for most applications,
+so this option is only used when using a `custom parser`_.
+
+.. _lists of arguments:
+.. _cl::list:
+
+The ``cl::list`` class
+^^^^^^^^^^^^^^^^^^^^^^
+
+The ``cl::list`` class is the class used to represent a list of command line
+options. It too is a templated class which can take up to three arguments:
+
+.. code-block:: c++
+
+ namespace cl {
+ template <class DataType, class Storage = bool,
+ class ParserClass = parser<DataType> >
+ class list;
+ }
+
+This class works the exact same as the `cl::opt`_ class, except that the second
+argument is the **type** of the external storage, not a boolean value. For this
+class, the marker type '``bool``' is used to indicate that internal storage
+should be used.
+
+.. _cl::bits:
+
+The ``cl::bits`` class
+^^^^^^^^^^^^^^^^^^^^^^
+
+The ``cl::bits`` class is the class used to represent a list of command line
+options in the form of a bit vector. It is also a templated class which can
+take up to three arguments:
+
+.. code-block:: c++
+
+ namespace cl {
+ template <class DataType, class Storage = bool,
+ class ParserClass = parser<DataType> >
+ class bits;
+ }
+
+This class works the exact same as the `cl::list`_ class, except that the second
+argument must be of **type** ``unsigned`` if external storage is used.
+
+.. _cl::alias:
+
+The ``cl::alias`` class
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``cl::alias`` class is a nontemplated class that is used to form aliases for
+other arguments.
+
+.. code-block:: c++
+
+ namespace cl {
+ class alias;
+ }
+
+The `cl::aliasopt`_ attribute should be used to specify which option this is an
+alias for. Alias arguments default to being `cl::Hidden`_, and use the aliased
+options parser to do the conversion from string to data.
+
+.. _cl::extrahelp:
+
+The ``cl::extrahelp`` class
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``cl::extrahelp`` class is a nontemplated class that allows extra help text
+to be printed out for the ``-help`` option.
+
+.. code-block:: c++
+
+ namespace cl {
+ struct extrahelp;
+ }
+
+To use the extrahelp, simply construct one with a ``const char*`` parameter to
+the constructor. The text passed to the constructor will be printed at the
+bottom of the help message, verbatim. Note that multiple ``cl::extrahelp``
+**can** be used, but this practice is discouraged. If your tool needs to print
+additional help information, put all that help into a single ``cl::extrahelp``
+instance.
+
+For example:
+
+.. code-block:: c++
+
+ cl::extrahelp("\nADDITIONAL HELP:\n\n This is the extra help\n");
+
+.. _different parser:
+.. _discussed previously:
+
+Builtin parsers
+---------------
+
+Parsers control how the string value taken from the command line is translated
+into a typed value, suitable for use in a C++ program. By default, the
+CommandLine library uses an instance of ``parser<type>`` if the command line
+option specifies that it uses values of type '``type``'. Because of this,
+custom option processing is specified with specializations of the '``parser``'
+class.
+
+The CommandLine library provides the following builtin parser specializations,
+which are sufficient for most applications. It can, however, also be extended to
+work with new data types and new ways of interpreting the same data. See the
+`Writing a Custom Parser`_ for more details on this type of library extension.
+
+.. _enums:
+.. _cl::parser:
+
+* The generic ``parser<t>`` parser can be used to map strings values to any data
+ type, through the use of the `cl::values`_ property, which specifies the
+ mapping information. The most common use of this parser is for parsing enum
+ values, which allows you to use the CommandLine library for all of the error
+ checking to make sure that only valid enum values are specified (as opposed to
+ accepting arbitrary strings). Despite this, however, the generic parser class
+ can be used for any data type.
+
+.. _boolean flags:
+.. _bool parser:
+
+* The **parser<bool> specialization** is used to convert boolean strings to a
+ boolean value. Currently accepted strings are "``true``", "``TRUE``",
+ "``True``", "``1``", "``false``", "``FALSE``", "``False``", and "``0``".
+
+* The **parser<boolOrDefault> specialization** is used for cases where the value
+ is boolean, but we also need to know whether the option was specified at all.
+ boolOrDefault is an enum with 3 values, BOU_UNSET, BOU_TRUE and BOU_FALSE.
+ This parser accepts the same strings as **``parser<bool>``**.
+
+.. _strings:
+
+* The **parser<string> specialization** simply stores the parsed string into the
+ string value specified. No conversion or modification of the data is
+ performed.
+
+.. _integers:
+.. _int:
+
+* The **parser<int> specialization** uses the C ``strtol`` function to parse the
+ string input. As such, it will accept a decimal number (with an optional '+'
+ or '-' prefix) which must start with a non-zero digit. It accepts octal
+ numbers, which are identified with a '``0``' prefix digit, and hexadecimal
+ numbers with a prefix of '``0x``' or '``0X``'.
+
+.. _doubles:
+.. _float:
+.. _double:
+
+* The **parser<double>** and **parser<float> specializations** use the standard
+ C ``strtod`` function to convert floating point strings into floating point
+ values. As such, a broad range of string formats is supported, including
+ exponential notation (ex: ``1.7e15``) and properly supports locales.
+
+.. _Extension Guide:
+.. _extending the library:
+
+Extension Guide
+===============
+
+Although the CommandLine library has a lot of functionality built into it
+already (as discussed previously), one of its true strengths lie in its
+extensibility. This section discusses how the CommandLine library works under
+the covers and illustrates how to do some simple, common, extensions.
+
+.. _Custom parsers:
+.. _custom parser:
+.. _Writing a Custom Parser:
+
+Writing a custom parser
+-----------------------
+
+One of the simplest and most common extensions is the use of a custom parser.
+As `discussed previously`_, parsers are the portion of the CommandLine library
+that turns string input from the user into a particular parsed data type,
+validating the input in the process.
+
+There are two ways to use a new parser:
+
+#. Specialize the `cl::parser`_ template for your custom data type.
+
+ This approach has the advantage that users of your custom data type will
+ automatically use your custom parser whenever they define an option with a
+ value type of your data type. The disadvantage of this approach is that it
+ doesn't work if your fundamental data type is something that is already
+ supported.
+
+#. Write an independent class, using it explicitly from options that need it.
+
+ This approach works well in situations where you would line to parse an
+ option using special syntax for a not-very-special data-type. The drawback
+ of this approach is that users of your parser have to be aware that they are
+ using your parser instead of the builtin ones.
+
+To guide the discussion, we will discuss a custom parser that accepts file
+sizes, specified with an optional unit after the numeric size. For example, we
+would like to parse "102kb", "41M", "1G" into the appropriate integer value. In
+this case, the underlying data type we want to parse into is '``unsigned``'. We
+choose approach #2 above because we don't want to make this the default for all
+``unsigned`` options.
+
+To start out, we declare our new ``FileSizeParser`` class:
+
+.. code-block:: c++
+
+ struct FileSizeParser : public cl::basic_parser<unsigned> {
+ // parse - Return true on error.
+ bool parse(cl::Option &O, const char *ArgName, const std::string &ArgValue,
+ unsigned &Val);
+ };
+
+Our new class inherits from the ``cl::basic_parser`` template class to fill in
+the default, boiler plate code for us. We give it the data type that we parse
+into, the last argument to the ``parse`` method, so that clients of our custom
+parser know what object type to pass in to the parse method. (Here we declare
+that we parse into '``unsigned``' variables.)
+
+For most purposes, the only method that must be implemented in a custom parser
+is the ``parse`` method. The ``parse`` method is called whenever the option is
+invoked, passing in the option itself, the option name, the string to parse, and
+a reference to a return value. If the string to parse is not well-formed, the
+parser should output an error message and return true. Otherwise it should
+return false and set '``Val``' to the parsed value. In our example, we
+implement ``parse`` as:
+
+.. code-block:: c++
+
+ bool FileSizeParser::parse(cl::Option &O, const char *ArgName,
+ const std::string &Arg, unsigned &Val) {
+ const char *ArgStart = Arg.c_str();
+ char *End;
+
+ // Parse integer part, leaving 'End' pointing to the first non-integer char
+ Val = (unsigned)strtol(ArgStart, &End, 0);
+
+ while (1) {
+ switch (*End++) {
+ case 0: return false; // No error
+ case 'i': // Ignore the 'i' in KiB if people use that
+ case 'b': case 'B': // Ignore B suffix
+ break;
+
+ case 'g': case 'G': Val *= 1024*1024*1024; break;
+ case 'm': case 'M': Val *= 1024*1024; break;
+ case 'k': case 'K': Val *= 1024; break;
+
+ default:
+ // Print an error message if unrecognized character!
+ return O.error("'" + Arg + "' value invalid for file size argument!");
+ }
+ }
+ }
+
+This function implements a very simple parser for the kinds of strings we are
+interested in. Although it has some holes (it allows "``123KKK``" for example),
+it is good enough for this example. Note that we use the option itself to print
+out the error message (the ``error`` method always returns true) in order to get
+a nice error message (shown below). Now that we have our parser class, we can
+use it like this:
+
+.. code-block:: c++
+
+ static cl::opt<unsigned, false, FileSizeParser>
+ MFS("max-file-size", cl::desc("Maximum file size to accept"),
+ cl::value_desc("size"));
+
+Which adds this to the output of our program:
+
+::
+
+ OPTIONS:
+ -help - display available options (-help-hidden for more)
+ ...
+ -max-file-size=<size> - Maximum file size to accept
+
+And we can test that our parse works correctly now (the test program just prints
+out the max-file-size argument value):
+
+::
+
+ $ ./test
+ MFS: 0
+ $ ./test -max-file-size=123MB
+ MFS: 128974848
+ $ ./test -max-file-size=3G
+ MFS: 3221225472
+ $ ./test -max-file-size=dog
+ -max-file-size option: 'dog' value invalid for file size argument!
+
+It looks like it works. The error message that we get is nice and helpful, and
+we seem to accept reasonable file sizes. This wraps up the "custom parser"
+tutorial.
+
+Exploiting external storage
+---------------------------
+
+Several of the LLVM libraries define static ``cl::opt`` instances that will
+automatically be included in any program that links with that library. This is
+a feature. However, sometimes it is necessary to know the value of the command
+line option outside of the library. In these cases the library does or should
+provide an external storage location that is accessible to users of the
+library. Examples of this include the ``llvm::DebugFlag`` exported by the
+``lib/Support/Debug.cpp`` file and the ``llvm::TimePassesIsEnabled`` flag
+exported by the ``lib/VMCore/PassManager.cpp`` file.
+
+.. todo::
+
+ TODO: complete this section
+
+.. _dynamically loaded options:
+
+Dynamically adding command line options
+
+.. todo::
+
+ TODO: fill in this section
diff --git a/docs/CompilerWriterInfo.html b/docs/CompilerWriterInfo.html
new file mode 100644
index 00000000000..66190655fa5
--- /dev/null
+++ b/docs/CompilerWriterInfo.html
@@ -0,0 +1,267 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>Architecture/platform information for compiler writers</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>
+ Architecture/platform information for compiler writers
+</h1>
+
+<div class="doc_warning">
+ <p>Note: This document is a work-in-progress. Additions and clarifications
+ are welcome.</p>
+</div>
+
+<ol>
+ <li><a href="#hw">Hardware</a>
+ <ol>
+ <li><a href="#arm">ARM</a></li>
+ <li><a href="#ia64">Itanium</a></li>
+ <li><a href="#mips">MIPS</a></li>
+ <li><a href="#ppc">PowerPC</a></li>
+ <li><a href="#sparc">SPARC</a></li>
+ <li><a href="#x86">X86</a></li>
+ <li><a href="#other">Other lists</a></li>
+ </ol></li>
+ <li><a href="#abi">Application Binary Interface (ABI)</a>
+ <ol>
+ <li><a href="#linux">Linux</a></li>
+ <li><a href="#osx">OS X</a></li>
+ </ol></li>
+ <li><a href="#misc">Miscellaneous resources</a></li>
+</ol>
+
+<div class="doc_author">
+ <p>Compiled by <a href="http://misha.brukman.net">Misha Brukman</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="hw">Hardware</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<!-- ======================================================================= -->
+<h3><a name="arm">ARM</a></h3>
+
+<div>
+<ul>
+<li><a href="http://www.arm.com/documentation/">ARM documentation</a>
+(<a href="http://www.arm.com/documentation/ARMProcessor_Cores/">Processor
+Cores</a>)</li>
+<li><a href="http://www.arm.com/products/DevTools/ABI.html">ABI</a></li>
+</ul>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="ia64">Itanium (ia64)</a></h3>
+
+<div>
+<ul>
+<li><a
+href="http://developer.intel.com/design/itanium2/documentation.htm">Itanium documentation</a>
+</li>
+</ul>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="mips">MIPS</a></h3>
+
+<div>
+<ul>
+<li><a
+href="http://mips.com/content/Documentation/MIPSDocumentation/ProcessorArchitecture/doclibrary">MIPS
+Processor Architecture</a></li>
+</ul>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="ppc">PowerPC</a></h3>
+
+<div>
+
+<!-- _______________________________________________________________________ -->
+<h4>IBM - Official manuals and docs</h4>
+
+<div>
+
+<ul>
+<li><a
+href="http://www-106.ibm.com/developerworks/eserver/articles/archguide.html">PowerPC
+Architecture Book</a>
+<ul>
+ <li>Book I: <a
+ href="http://www-106.ibm.com/developerworks/eserver/pdfs/archpub1.pdf">PowerPC
+ User Instruction Set Architecture</a></li>
+ <li>Book II: <a
+ href="http://www-106.ibm.com/developerworks/eserver/pdfs/archpub2.pdf">PowerPC
+ Virtual Environment Architecture</a></li>
+ <li>Book III: <a
+ href="http://www-106.ibm.com/developerworks/eserver/pdfs/archpub3.pdf">PowerPC
+ Operating Environment Architecture</a></li>
+</ul></li>
+<li><a
+href="http://www-3.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF7785256996007558C6">PowerPC
+Compiler Writer's Guide</a></li>
+<li><A
+href="http://www-3.ibm.com/chips/techlib/techlib.nsf/products/PowerPC">PowerPC
+Processor Manuals</a></li>
+<li><a
+href="http://www-106.ibm.com/developerworks/linux/library/l-powarch/">Intro to
+PowerPC architecture</a></li>
+<li><a href="http://publibn.boulder.ibm.com/doc_link/en_US/a_doc_lib/aixassem/alangref/alangreftfrm.htm">IBM AIX/5L for POWER Assembly reference</a></li>
+</ul>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>Other documents, collections, notes</h4>
+
+<div>
+
+<ul>
+<li><a href="http://penguinppc.org/dev/#library">PowerPC ABI documents</a></li>
+<li><a href="http://gcc.gnu.org/ml/gcc-patches/2003-09/msg00997.html">PowerPC64
+alignment of long doubles (from GCC)</a></li>
+<li><a href="http://sources.redhat.com/ml/binutils/2002-04/msg00573.html">Long
+branch stubs for powerpc64-linux (from binutils)</a></li>
+</ul>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="sparc">SPARC</a></h3>
+
+<div>
+
+<ul>
+<li><a href="http://www.sparc.org/resource.htm">SPARC resources</a></li>
+<li><a href="http://www.sparc.org/standards.html">SPARC standards</a></li>
+</ul>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="x86">X86</a></h3>
+
+<div>
+
+<!-- _______________________________________________________________________ -->
+<h4>AMD - Official manuals and docs</h4>
+
+<div>
+<ul>
+<li><a
+href="http://www.amd.com/us-en/Processors/TechnicalResources/0,,30_182_739,00.html">AMD processor manuals</a></li>
+<li><a href="http://www.x86-64.org/documentation">X86-64 ABI</a></li>
+</ul>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>Intel - Official manuals and docs</h4>
+
+<div>
+<ul>
+<li><a
+href="http://developer.intel.com/design/pentium4/manuals/index_new.htm">IA-32
+manuals</a></li>
+<li><a
+href="http://www.intel.com/design/itanium/documentation.htm?iid=ipp_srvr_proc_itanium2+techdocs">Intel
+Itanium documentation</a></li>
+</ul>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>Other x86-specific information</h4>
+
+<div>
+<ul>
+<li><a href="http://www.agner.org/assem/calling_conventions.pdf">Calling
+conventions for different C++ compilers and operating systems</a></li>
+</ul>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="other">Other relevant lists</a></h3>
+
+<div>
+
+<ul>
+<li><a href="http://gcc.gnu.org/readings.html">GCC reading list</a></li>
+</ul>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="abi">ABI</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<!-- ======================================================================= -->
+<h3><a name="linux">Linux</a></h3>
+
+<div>
+<ol>
+<li><a href="http://www.linuxbase.org/spec/ELF/ppc64/">PowerPC 64-bit ELF ABI
+Supplement</a></li>
+</ol>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="osx">OS X</a></h3>
+
+<div>
+<ol>
+<li><a
+href="http://developer.apple.com/documentation/Darwin/RuntimeArchitecture-date.html">Mach-O
+Runtime Architecture</a></li>
+<li><a href="http://www.unsanity.org/archives/000044.php">Notes on Mach-O
+ABI</a></li>
+</ol>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="misc">Miscellaneous resources</a></h2>
+<!-- *********************************************************************** -->
+
+<ul>
+<li><a
+href="http://www.nondot.org/sabre/os/articles/ExecutableFileFormats/">Executable
+File Format library</a></li>
+<li><a href="http://gcc.gnu.org/projects/prefetch.html">GCC prefetch project</a>
+page has a good survey of the prefetching capabilities of a variety of modern
+processors.</li>
+</ul>
+
+<!-- *********************************************************************** -->
+
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="http://misha.brukman.net">Misha Brukman</a><br>
+ <a href="http://llvm.org/">LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+
+</body>
+</html>
diff --git a/docs/DebuggingJITedCode.html b/docs/DebuggingJITedCode.html
new file mode 100644
index 00000000000..7d52fa76354
--- /dev/null
+++ b/docs/DebuggingJITedCode.html
@@ -0,0 +1,184 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>Debugging JITed Code With GDB</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+<body>
+
+<h1>Debugging JIT-ed Code With GDB</h1>
+<ol>
+ <li><a href="#background">Background</a></li>
+ <li><a href="#gdbversion">GDB Version</a></li>
+ <li><a href="#mcjitdebug">Debugging MCJIT-ed code</a></li>
+ <ul>
+ <li><a href="#mcjitdebug_example">Example</a></li>
+ </ul>
+</ol>
+<div class="doc_author">Written by Reid Kleckner and Eli Bendersky</div>
+
+<!--=========================================================================-->
+<h2><a name="background">Background</a></h2>
+<!--=========================================================================-->
+<div>
+
+<p>Without special runtime support, debugging dynamically generated code with
+GDB (as well as most debuggers) can be quite painful. Debuggers generally read
+debug information from the object file of the code, but for JITed code, there is
+no such file to look for.
+</p>
+
+<p>In order to communicate the necessary debug info to GDB, an interface for
+registering JITed code with debuggers has been designed and implemented for
+GDB and LLVM MCJIT. At a high level, whenever MCJIT generates new machine code,
+it does so in an in-memory object file that contains the debug information in
+DWARF format. MCJIT then adds this in-memory object file to a global list of
+dynamically generated object files and calls a special function
+(<tt>__jit_debug_register_code</tt>) marked noinline that GDB knows about. When
+GDB attaches to a process, it puts a breakpoint in this function and loads all
+of the object files in the global list. When MCJIT calls the registration
+function, GDB catches the breakpoint signal, loads the new object file from
+the inferior's memory, and resumes the execution. In this way, GDB can get the
+necessary debug information.
+</p>
+</div>
+
+<!--=========================================================================-->
+<h2><a name="gdbversion">GDB Version</a></h2>
+<!--=========================================================================-->
+
+<p>In order to debug code JIT-ed by LLVM, you need GDB 7.0 or newer, which is
+available on most modern distributions of Linux. The version of GDB that Apple
+ships with Xcode has been frozen at 6.3 for a while. LLDB may be a better
+option for debugging JIT-ed code on Mac OS X.
+</p>
+
+
+<!--=========================================================================-->
+<h2><a name="mcjitdebug">Debugging MCJIT-ed code</a></h2>
+<!--=========================================================================-->
+<div>
+
+<p>The emerging MCJIT component of LLVM allows full debugging of JIT-ed code with
+GDB. This is due to MCJIT's ability to use the MC emitter to provide full
+DWARF debugging information to GDB.</p>
+
+<p>Note that lli has to be passed the <tt>-use-mcjit</tt> flag to JIT the code
+with MCJIT instead of the old JIT.</p>
+
+<h3><a name="mcjitdebug_example">Example</a></h3>
+
+<div>
+
+<p>Consider the following C code (with line numbers added to make the example
+easier to follow):</p>
+
+<pre class="doc_code">
+1 int compute_factorial(int n)
+2 {
+3 if (n <= 1)
+4 return 1;
+5
+6 int f = n;
+7 while (--n > 1)
+8 f *= n;
+9 return f;
+10 }
+11
+12
+13 int main(int argc, char** argv)
+14 {
+15 if (argc < 2)
+16 return -1;
+17 char firstletter = argv[1][0];
+18 int result = compute_factorial(firstletter - '0');
+19
+20 // Returned result is clipped at 255...
+21 return result;
+22 }
+</pre>
+
+<p>Here is a sample command line session that shows how to build and run this
+code via lli inside GDB:
+</p>
+
+<pre class="doc_code">
+$ $BINPATH/clang -cc1 -O0 -g -emit-llvm showdebug.c
+$ gdb --quiet --args $BINPATH/lli -use-mcjit showdebug.ll 5
+Reading symbols from $BINPATH/lli...done.
+(gdb) b showdebug.c:6
+No source file named showdebug.c.
+Make breakpoint pending on future shared library load? (y or [n]) y
+Breakpoint 1 (showdebug.c:6) pending.
+(gdb) r
+Starting program: $BINPATH/lli -use-mcjit showdebug.ll 5
+[Thread debugging using libthread_db enabled]
+
+Breakpoint 1, compute_factorial (n=5) at showdebug.c:6
+6 int f = n;
+(gdb) p n
+$1 = 5
+(gdb) p f
+$2 = 0
+(gdb) n
+7 while (--n > 1)
+(gdb) p f
+$3 = 5
+(gdb) b showdebug.c:9
+Breakpoint 2 at 0x7ffff7ed404c: file showdebug.c, line 9.
+(gdb) c
+Continuing.
+
+Breakpoint 2, compute_factorial (n=1) at showdebug.c:9
+9 return f;
+(gdb) p f
+$4 = 120
+(gdb) bt
+#0 compute_factorial (n=1) at showdebug.c:9
+#1 0x00007ffff7ed40a9 in main (argc=2, argv=0x16677e0) at showdebug.c:18
+#2 0x3500000001652748 in ?? ()
+#3 0x00000000016677e0 in ?? ()
+#4 0x0000000000000002 in ?? ()
+#5 0x0000000000d953b3 in llvm::MCJIT::runFunction (this=0x16151f0, F=0x1603020, ArgValues=...) at /home/ebenders_test/llvm_svn_rw/lib/ExecutionEngine/MCJIT/MCJIT.cpp:161
+#6 0x0000000000dc8872 in llvm::ExecutionEngine::runFunctionAsMain (this=0x16151f0, Fn=0x1603020, argv=..., envp=0x7fffffffe040)
+ at /home/ebenders_test/llvm_svn_rw/lib/ExecutionEngine/ExecutionEngine.cpp:397
+#7 0x000000000059c583 in main (argc=4, argv=0x7fffffffe018, envp=0x7fffffffe040) at /home/ebenders_test/llvm_svn_rw/tools/lli/lli.cpp:324
+(gdb) finish
+Run till exit from #0 compute_factorial (n=1) at showdebug.c:9
+0x00007ffff7ed40a9 in main (argc=2, argv=0x16677e0) at showdebug.c:18
+18 int result = compute_factorial(firstletter - '0');
+Value returned is $5 = 120
+(gdb) p result
+$6 = 23406408
+(gdb) n
+21 return result;
+(gdb) p result
+$7 = 120
+(gdb) c
+Continuing.
+
+Program exited with code 0170.
+(gdb)
+
+</pre>
+
+</div>
+</div>
+
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+ <a href="mailto:reid.kleckner@gmail.com">Reid Kleckner</a>,
+ <a href="mailto:eliben@gmail.com">Eli Bendersky</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/DeveloperPolicy.rst b/docs/DeveloperPolicy.rst
new file mode 100644
index 00000000000..cda281a25c1
--- /dev/null
+++ b/docs/DeveloperPolicy.rst
@@ -0,0 +1,508 @@
+.. _developer_policy:
+
+=====================
+LLVM Developer Policy
+=====================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+This document contains the LLVM Developer Policy which defines the project's
+policy towards developers and their contributions. The intent of this policy is
+to eliminate miscommunication, rework, and confusion that might arise from the
+distributed nature of LLVM's development. By stating the policy in clear terms,
+we hope each developer can know ahead of time what to expect when making LLVM
+contributions. This policy covers all llvm.org subprojects, including Clang,
+LLDB, libc++, etc.
+
+This policy is also designed to accomplish the following objectives:
+
+#. Attract both users and developers to the LLVM project.
+
+#. Make life as simple and easy for contributors as possible.
+
+#. Keep the top of Subversion trees as stable as possible.
+
+#. Establish awareness of the project's `copyright, license, and patent
+ policies`_ with contributors to the project.
+
+This policy is aimed at frequent contributors to LLVM. People interested in
+contributing one-off patches can do so in an informal way by sending them to the
+`llvm-commits mailing list
+<http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits>`_ and engaging another
+developer to see it through the process.
+
+Developer Policies
+==================
+
+This section contains policies that pertain to frequent LLVM developers. We
+always welcome `one-off patches`_ from people who do not routinely contribute to
+LLVM, but we expect more from frequent contributors to keep the system as
+efficient as possible for everyone. Frequent LLVM contributors are expected to
+meet the following requirements in order for LLVM to maintain a high standard of
+quality.
+
+Stay Informed
+-------------
+
+Developers should stay informed by reading at least the "dev" mailing list for
+the projects you are interested in, such as `llvmdev
+<http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev>`_ for LLVM, `cfe-dev
+<http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev>`_ for Clang, or `lldb-dev
+<http://lists.cs.uiuc.edu/mailman/listinfo/lldb-dev>`_ for LLDB. If you are
+doing anything more than just casual work on LLVM, it is suggested that you also
+subscribe to the "commits" mailing list for the subproject you're interested in,
+such as `llvm-commits
+<http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits>`_, `cfe-commits
+<http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits>`_, or `lldb-commits
+<http://lists.cs.uiuc.edu/mailman/listinfo/lldb-commits>`_. Reading the
+"commits" list and paying attention to changes being made by others is a good
+way to see what other people are interested in and watching the flow of the
+project as a whole.
+
+We recommend that active developers register an email account with `LLVM
+Bugzilla <http://llvm.org/bugs/>`_ and preferably subscribe to the `llvm-bugs
+<http://lists.cs.uiuc.edu/mailman/listinfo/llvmbugs>`_ email list to keep track
+of bugs and enhancements occurring in LLVM. We really appreciate people who are
+proactive at catching incoming bugs in their components and dealing with them
+promptly.
+
+.. _patch:
+.. _one-off patches:
+
+Making a Patch
+--------------
+
+When making a patch for review, the goal is to make it as easy for the reviewer
+to read it as possible. As such, we recommend that you:
+
+#. Make your patch against the Subversion trunk, not a branch, and not an old
+ version of LLVM. This makes it easy to apply the patch. For information on
+ how to check out SVN trunk, please see the `Getting Started
+ Guide <GettingStarted.html#checkout>`_.
+
+#. Similarly, patches should be submitted soon after they are generated. Old
+ patches may not apply correctly if the underlying code changes between the
+ time the patch was created and the time it is applied.
+
+#. Patches should be made with ``svn diff``, or similar. If you use a
+ different tool, make sure it uses the ``diff -u`` format and that it
+ doesn't contain clutter which makes it hard to read.
+
+#. If you are modifying generated files, such as the top-level ``configure``
+ script, please separate out those changes into a separate patch from the rest
+ of your changes.
+
+When sending a patch to a mailing list, it is a good idea to send it as an
+*attachment* to the message, not embedded into the text of the message. This
+ensures that your mailer will not mangle the patch when it sends it (e.g. by
+making whitespace changes or by wrapping lines).
+
+*For Thunderbird users:* Before submitting a patch, please open *Preferences >
+Advanced > General > Config Editor*, find the key
+``mail.content_disposition_type``, and set its value to ``1``. Without this
+setting, Thunderbird sends your attachment using ``Content-Disposition: inline``
+rather than ``Content-Disposition: attachment``. Apple Mail gamely displays such
+a file inline, making it difficult to work with for reviewers using that
+program.
+
+.. _code review:
+
+Code Reviews
+------------
+
+LLVM has a code review policy. Code review is one way to increase the quality of
+software. We generally follow these policies:
+
+#. All developers are required to have significant changes reviewed before they
+ are committed to the repository.
+
+#. Code reviews are conducted by email, usually on the llvm-commits list.
+
+#. Code can be reviewed either before it is committed or after. We expect major
+ changes to be reviewed before being committed, but smaller changes (or
+ changes where the developer owns the component) can be reviewed after commit.
+
+#. The developer responsible for a code change is also responsible for making
+ all necessary review-related changes.
+
+#. Code review can be an iterative process, which continues until the patch is
+ ready to be committed.
+
+Developers should participate in code reviews as both reviewers and
+reviewees. If someone is kind enough to review your code, you should return the
+favor for someone else. Note that anyone is welcome to review and give feedback
+on a patch, but only people with Subversion write access can approve it.
+
+Code Owners
+-----------
+
+The LLVM Project relies on two features of its process to maintain rapid
+development in addition to the high quality of its source base: the combination
+of code review plus post-commit review for trusted maintainers. Having both is
+a great way for the project to take advantage of the fact that most people do
+the right thing most of the time, and only commit patches without pre-commit
+review when they are confident they are right.
+
+The trick to this is that the project has to guarantee that all patches that are
+committed are reviewed after they go in: you don't want everyone to assume
+someone else will review it, allowing the patch to go unreviewed. To solve this
+problem, we have a notion of an 'owner' for a piece of the code. The sole
+responsibility of a code owner is to ensure that a commit to their area of the
+code is appropriately reviewed, either by themself or by someone else. The list
+of current code owners can be found in the file
+`CODE_OWNERS.TXT <http://llvm.org/viewvc/llvm-project/llvm/trunk/CODE_OWNERS.TXT?view=markup>`_
+in the root of the LLVM source tree.
+
+Note that code ownership is completely different than reviewers: anyone can
+review a piece of code, and we welcome code review from anyone who is
+interested. Code owners are the "last line of defense" to guarantee that all
+patches that are committed are actually reviewed.
+
+Being a code owner is a somewhat unglamorous position, but it is incredibly
+important for the ongoing success of the project. Because people get busy,
+interests change, and unexpected things happen, code ownership is purely opt-in,
+and anyone can choose to resign their "title" at any time. For now, we do not
+have an official policy on how one gets elected to be a code owner.
+
+.. _include a testcase:
+
+Test Cases
+----------
+
+Developers are required to create test cases for any bugs fixed and any new
+features added. Some tips for getting your testcase approved:
+
+* All feature and regression test cases are added to the ``llvm/test``
+ directory. The appropriate sub-directory should be selected (see the `Testing
+ Guide <TestingGuide.html>`_ for details).
+
+* Test cases should be written in `LLVM assembly language <LangRef.html>`_
+ unless the feature or regression being tested requires another language
+ (e.g. the bug being fixed or feature being implemented is in the llvm-gcc C++
+ front-end, in which case it must be written in C++).
+
+* Test cases, especially for regressions, should be reduced as much as possible,
+ by `bugpoint <Bugpoint.html>`_ or manually. It is unacceptable to place an
+ entire failing program into ``llvm/test`` as this creates a *time-to-test*
+ burden on all developers. Please keep them short.
+
+Note that llvm/test and clang/test are designed for regression and small feature
+tests only. More extensive test cases (e.g., entire applications, benchmarks,
+etc) should be added to the ``llvm-test`` test suite. The llvm-test suite is
+for coverage (correctness, performance, etc) testing, not feature or regression
+testing.
+
+Quality
+-------
+
+The minimum quality standards that any change must satisfy before being
+committed to the main development branch are:
+
+#. Code must adhere to the `LLVM Coding Standards <CodingStandards.html>`_.
+
+#. Code must compile cleanly (no errors, no warnings) on at least one platform.
+
+#. Bug fixes and new features should `include a testcase`_ so we know if the
+ fix/feature ever regresses in the future.
+
+#. Code must pass the ``llvm/test`` test suite.
+
+#. The code must not cause regressions on a reasonable subset of llvm-test,
+ where "reasonable" depends on the contributor's judgement and the scope of
+ the change (more invasive changes require more testing). A reasonable subset
+ might be something like "``llvm-test/MultiSource/Benchmarks``".
+
+Additionally, the committer is responsible for addressing any problems found in
+the future that the change is responsible for. For example:
+
+* The code should compile cleanly on all supported platforms.
+
+* The changes should not cause any correctness regressions in the ``llvm-test``
+ suite and must not cause any major performance regressions.
+
+* The change set should not cause performance or correctness regressions for the
+ LLVM tools.
+
+* The changes should not cause performance or correctness regressions in code
+ compiled by LLVM on all applicable targets.
+
+* You are expected to address any `Bugzilla bugs <http://llvm.org/bugs/>`_ that
+ result from your change.
+
+We prefer for this to be handled before submission but understand that it isn't
+possible to test all of this for every submission. Our build bots and nightly
+testing infrastructure normally finds these problems. A good rule of thumb is
+to check the nightly testers for regressions the day after your change. Build
+bots will directly email you if a group of commits that included yours caused a
+failure. You are expected to check the build bot messages to see if they are
+your fault and, if so, fix the breakage.
+
+Commits that violate these quality standards (e.g. are very broken) may be
+reverted. This is necessary when the change blocks other developers from making
+progress. The developer is welcome to re-commit the change after the problem has
+been fixed.
+
+Obtaining Commit Access
+-----------------------
+
+We grant commit access to contributors with a track record of submitting high
+quality patches. If you would like commit access, please send an email to
+`Chris <mailto:sabre@nondot.org>`_ with the following information:
+
+#. The user name you want to commit with, e.g. "hacker".
+
+#. The full name and email address you want message to llvm-commits to come
+ from, e.g. "J. Random Hacker <hacker@yoyodyne.com>".
+
+#. A "password hash" of the password you want to use, e.g. "``2ACR96qjUqsyM``".
+ Note that you don't ever tell us what your password is, you just give it to
+ us in an encrypted form. To get this, run "``htpasswd``" (a utility that
+ comes with apache) in crypt mode (often enabled with "``-d``"), or find a web
+ page that will do it for you.
+
+Once you've been granted commit access, you should be able to check out an LLVM
+tree with an SVN URL of "https://username@llvm.org/..." instead of the normal
+anonymous URL of "http://llvm.org/...". The first time you commit you'll have
+to type in your password. Note that you may get a warning from SVN about an
+untrusted key, you can ignore this. To verify that your commit access works,
+please do a test commit (e.g. change a comment or add a blank line). Your first
+commit to a repository may require the autogenerated email to be approved by a
+mailing list. This is normal, and will be done when the mailing list owner has
+time.
+
+If you have recently been granted commit access, these policies apply:
+
+#. You are granted *commit-after-approval* to all parts of LLVM. To get
+ approval, submit a `patch`_ to `llvm-commits
+ <http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits>`_. When approved
+ you may commit it yourself.</li>
+
+#. You are allowed to commit patches without approval which you think are
+ obvious. This is clearly a subjective decision --- we simply expect you to
+ use good judgement. Examples include: fixing build breakage, reverting
+ obviously broken patches, documentation/comment changes, any other minor
+ changes.
+
+#. You are allowed to commit patches without approval to those portions of LLVM
+ that you have contributed or maintain (i.e., have been assigned
+ responsibility for), with the proviso that such commits must not break the
+ build. This is a "trust but verify" policy and commits of this nature are
+ reviewed after they are committed.
+
+#. Multiple violations of these policies or a single egregious violation may
+ cause commit access to be revoked.
+
+In any case, your changes are still subject to `code review`_ (either before or
+after they are committed, depending on the nature of the change). You are
+encouraged to review other peoples' patches as well, but you aren't required
+to.
+
+.. _discuss the change/gather consensus:
+
+Making a Major Change
+---------------------
+
+When a developer begins a major new project with the aim of contributing it back
+to LLVM, s/he should inform the community with an email to the `llvmdev
+<http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev>`_ email list, to the extent
+possible. The reason for this is to:
+
+#. keep the community informed about future changes to LLVM,
+
+#. avoid duplication of effort by preventing multiple parties working on the
+ same thing and not knowing about it, and
+
+#. ensure that any technical issues around the proposed work are discussed and
+ resolved before any significant work is done.
+
+The design of LLVM is carefully controlled to ensure that all the pieces fit
+together well and are as consistent as possible. If you plan to make a major
+change to the way LLVM works or want to add a major new extension, it is a good
+idea to get consensus with the development community before you start working on
+it.
+
+Once the design of the new feature is finalized, the work itself should be done
+as a series of `incremental changes`_, not as a long-term development branch.
+
+.. _incremental changes:
+
+Incremental Development
+-----------------------
+
+In the LLVM project, we do all significant changes as a series of incremental
+patches. We have a strong dislike for huge changes or long-term development
+branches. Long-term development branches have a number of drawbacks:
+
+#. Branches must have mainline merged into them periodically. If the branch
+ development and mainline development occur in the same pieces of code,
+ resolving merge conflicts can take a lot of time.
+
+#. Other people in the community tend to ignore work on branches.
+
+#. Huge changes (produced when a branch is merged back onto mainline) are
+ extremely difficult to `code review`_.
+
+#. Branches are not routinely tested by our nightly tester infrastructure.
+
+#. Changes developed as monolithic large changes often don't work until the
+ entire set of changes is done. Breaking it down into a set of smaller
+ changes increases the odds that any of the work will be committed to the main
+ repository.
+
+To address these problems, LLVM uses an incremental development style and we
+require contributors to follow this practice when making a large/invasive
+change. Some tips:
+
+* Large/invasive changes usually have a number of secondary changes that are
+ required before the big change can be made (e.g. API cleanup, etc). These
+ sorts of changes can often be done before the major change is done,
+ independently of that work.
+
+* The remaining inter-related work should be decomposed into unrelated sets of
+ changes if possible. Once this is done, define the first increment and get
+ consensus on what the end goal of the change is.
+
+* Each change in the set can be stand alone (e.g. to fix a bug), or part of a
+ planned series of changes that works towards the development goal.
+
+* Each change should be kept as small as possible. This simplifies your work
+ (into a logical progression), simplifies code review and reduces the chance
+ that you will get negative feedback on the change. Small increments also
+ facilitate the maintenance of a high quality code base.
+
+* Often, an independent precursor to a big change is to add a new API and slowly
+ migrate clients to use the new API. Each change to use the new API is often
+ "obvious" and can be committed without review. Once the new API is in place
+ and used, it is much easier to replace the underlying implementation of the
+ API. This implementation change is logically separate from the API
+ change.
+
+If you are interested in making a large change, and this scares you, please make
+sure to first `discuss the change/gather consensus`_ then ask about the best way
+to go about making the change.
+
+Attribution of Changes
+----------------------
+
+We believe in correct attribution of contributions to their contributors.
+However, we do not want the source code to be littered with random attributions
+"this code written by J. Random Hacker" (this is noisy and distracting). In
+practice, the revision control system keeps a perfect history of who changed
+what, and the CREDITS.txt file describes higher-level contributions. If you
+commit a patch for someone else, please say "patch contributed by J. Random
+Hacker!" in the commit message.
+
+Overall, please do not add contributor names to the source code.
+
+.. _copyright, license, and patent policies:
+
+Copyright, License, and Patents
+===============================
+
+.. note::
+
+ This section deals with legal matters but does not provide legal advice. We
+ are not lawyers --- please seek legal counsel from an attorney.
+
+This section addresses the issues of copyright, license and patents for the LLVM
+project. The copyright for the code is held by the individual contributors of
+the code and the terms of its license to LLVM users and developers is the
+`University of Illinois/NCSA Open Source License
+<http://www.opensource.org/licenses/UoI-NCSA.php>`_ (with portions dual licensed
+under the `MIT License <http://www.opensource.org/licenses/mit-license.php>`_,
+see below). As contributor to the LLVM project, you agree to allow any
+contributions to the project to licensed under these terms.
+
+Copyright
+---------
+
+The LLVM project does not require copyright assignments, which means that the
+copyright for the code in the project is held by its respective contributors who
+have each agreed to release their contributed code under the terms of the `LLVM
+License`_.
+
+An implication of this is that the LLVM license is unlikely to ever change:
+changing it would require tracking down all the contributors to LLVM and getting
+them to agree that a license change is acceptable for their contribution. Since
+there are no plans to change the license, this is not a cause for concern.
+
+As a contributor to the project, this means that you (or your company) retain
+ownership of the code you contribute, that it cannot be used in a way that
+contradicts the license (which is a liberal BSD-style license), and that the
+license for your contributions won't change without your approval in the
+future.
+
+.. _LLVM License:
+
+License
+-------
+
+We intend to keep LLVM perpetually open source and to use a liberal open source
+license. **As a contributor to the project, you agree that any contributions be
+licensed under the terms of the corresponding subproject.** All of the code in
+LLVM is available under the `University of Illinois/NCSA Open Source License
+<http://www.opensource.org/licenses/UoI-NCSA.php>`_, which boils down to
+this:
+
+* You can freely distribute LLVM.
+* You must retain the copyright notice if you redistribute LLVM.
+* Binaries derived from LLVM must reproduce the copyright notice (e.g. in an
+ included readme file).
+* You can't use our names to promote your LLVM derived products.
+* There's no warranty on LLVM at all.
+
+We believe this fosters the widest adoption of LLVM because it **allows
+commercial products to be derived from LLVM** with few restrictions and without
+a requirement for making any derived works also open source (i.e. LLVM's
+license is not a "copyleft" license like the GPL). We suggest that you read the
+`License <http://www.opensource.org/licenses/UoI-NCSA.php>`_ if further
+clarification is needed.
+
+In addition to the UIUC license, the runtime library components of LLVM
+(**compiler_rt, libc++, and libclc**) are also licensed under the `MIT License
+<http://www.opensource.org/licenses/mit-license.php>`_, which does not contain
+the binary redistribution clause. As a user of these runtime libraries, it
+means that you can choose to use the code under either license (and thus don't
+need the binary redistribution clause), and as a contributor to the code that
+you agree that any contributions to these libraries be licensed under both
+licenses. We feel that this is important for runtime libraries, because they
+are implicitly linked into applications and therefore should not subject those
+applications to the binary redistribution clause. This also means that it is ok
+to move code from (e.g.) libc++ to the LLVM core without concern, but that code
+cannot be moved from the LLVM core to libc++ without the copyright owner's
+permission.
+
+Note that the LLVM Project does distribute llvm-gcc and dragonegg, **which are
+GPL.** This means that anything "linked" into llvm-gcc must itself be compatible
+with the GPL, and must be releasable under the terms of the GPL. This implies
+that **any code linked into llvm-gcc and distributed to others may be subject to
+the viral aspects of the GPL** (for example, a proprietary code generator linked
+into llvm-gcc must be made available under the GPL). This is not a problem for
+code already distributed under a more liberal license (like the UIUC license),
+and GPL-containing subprojects are kept in separate SVN repositories whose
+LICENSE.txt files specifically indicate that they contain GPL code.
+
+We have no plans to change the license of LLVM. If you have questions or
+comments about the license, please contact the `LLVM Developer's Mailing
+List <mailto:llvmdev@cs.uiuc.edu>`_.
+
+Patents
+-------
+
+To the best of our knowledge, LLVM does not infringe on any patents (we have
+actually removed code from LLVM in the past that was found to infringe). Having
+code in LLVM that infringes on patents would violate an important goal of the
+project by making it hard or impossible to reuse the code for arbitrary purposes
+(including commercial use).
+
+When contributing code, we expect contributors to notify us of any potential for
+patent-related trouble with their changes (including from third parties). If
+you or your employer own the rights to a patent and would like to contribute
+code to LLVM that relies on it, we require that the copyright owner sign an
+agreement that allows any other user of LLVM to freely use your patent. Please
+contact the `oversight group <mailto:llvm-oversight@cs.uiuc.edu>`_ for more
+details.
diff --git a/docs/ExceptionHandling.rst b/docs/ExceptionHandling.rst
new file mode 100644
index 00000000000..190f18261da
--- /dev/null
+++ b/docs/ExceptionHandling.rst
@@ -0,0 +1,367 @@
+.. _exception_handling:
+
+==========================
+Exception Handling in LLVM
+==========================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+This document is the central repository for all information pertaining to
+exception handling in LLVM. It describes the format that LLVM exception
+handling information takes, which is useful for those interested in creating
+front-ends or dealing directly with the information. Further, this document
+provides specific examples of what exception handling information is used for in
+C and C++.
+
+Itanium ABI Zero-cost Exception Handling
+----------------------------------------
+
+Exception handling for most programming languages is designed to recover from
+conditions that rarely occur during general use of an application. To that end,
+exception handling should not interfere with the main flow of an application's
+algorithm by performing checkpointing tasks, such as saving the current pc or
+register state.
+
+The Itanium ABI Exception Handling Specification defines a methodology for
+providing outlying data in the form of exception tables without inlining
+speculative exception handling code in the flow of an application's main
+algorithm. Thus, the specification is said to add "zero-cost" to the normal
+execution of an application.
+
+A more complete description of the Itanium ABI exception handling runtime
+support of can be found at `Itanium C++ ABI: Exception Handling
+<http://www.codesourcery.com/cxx-abi/abi-eh.html>`_. A description of the
+exception frame format can be found at `Exception Frames
+<http://refspecs.freestandards.org/LSB_3.0.0/LSB-Core-generic/LSB-Core-generic/ehframechpt.html>`_,
+with details of the DWARF 4 specification at `DWARF 4 Standard
+<http://dwarfstd.org/Dwarf4Std.php>`_. A description for the C++ exception
+table formats can be found at `Exception Handling Tables
+<http://www.codesourcery.com/cxx-abi/exceptions.pdf>`_.
+
+Setjmp/Longjmp Exception Handling
+---------------------------------
+
+Setjmp/Longjmp (SJLJ) based exception handling uses LLVM intrinsics
+`llvm.eh.sjlj.setjmp`_ and `llvm.eh.sjlj.longjmp`_ to handle control flow for
+exception handling.
+
+For each function which does exception processing --- be it ``try``/``catch``
+blocks or cleanups --- that function registers itself on a global frame
+list. When exceptions are unwinding, the runtime uses this list to identify
+which functions need processing.
+
+Landing pad selection is encoded in the call site entry of the function
+context. The runtime returns to the function via `llvm.eh.sjlj.longjmp`_, where
+a switch table transfers control to the appropriate landing pad based on the
+index stored in the function context.
+
+In contrast to DWARF exception handling, which encodes exception regions and
+frame information in out-of-line tables, SJLJ exception handling builds and
+removes the unwind frame context at runtime. This results in faster exception
+handling at the expense of slower execution when no exceptions are thrown. As
+exceptions are, by their nature, intended for uncommon code paths, DWARF
+exception handling is generally preferred to SJLJ.
+
+Overview
+--------
+
+When an exception is thrown in LLVM code, the runtime does its best to find a
+handler suited to processing the circumstance.
+
+The runtime first attempts to find an *exception frame* corresponding to the
+function where the exception was thrown. If the programming language supports
+exception handling (e.g. C++), the exception frame contains a reference to an
+exception table describing how to process the exception. If the language does
+not support exception handling (e.g. C), or if the exception needs to be
+forwarded to a prior activation, the exception frame contains information about
+how to unwind the current activation and restore the state of the prior
+activation. This process is repeated until the exception is handled. If the
+exception is not handled and no activations remain, then the application is
+terminated with an appropriate error message.
+
+Because different programming languages have different behaviors when handling
+exceptions, the exception handling ABI provides a mechanism for
+supplying *personalities*. An exception handling personality is defined by
+way of a *personality function* (e.g. ``__gxx_personality_v0`` in C++),
+which receives the context of the exception, an *exception structure*
+containing the exception object type and value, and a reference to the exception
+table for the current function. The personality function for the current
+compile unit is specified in a *common exception frame*.
+
+The organization of an exception table is language dependent. For C++, an
+exception table is organized as a series of code ranges defining what to do if
+an exception occurs in that range. Typically, the information associated with a
+range defines which types of exception objects (using C++ *type info*) that are
+handled in that range, and an associated action that should take place. Actions
+typically pass control to a *landing pad*.
+
+A landing pad corresponds roughly to the code found in the ``catch`` portion of
+a ``try``/``catch`` sequence. When execution resumes at a landing pad, it
+receives an *exception structure* and a *selector value* corresponding to the
+*type* of exception thrown. The selector is then used to determine which *catch*
+should actually process the exception.
+
+LLVM Code Generation
+====================
+
+From a C++ developer's perspective, exceptions are defined in terms of the
+``throw`` and ``try``/``catch`` statements. In this section we will describe the
+implementation of LLVM exception handling in terms of C++ examples.
+
+Throw
+-----
+
+Languages that support exception handling typically provide a ``throw``
+operation to initiate the exception process. Internally, a ``throw`` operation
+breaks down into two steps.
+
+#. A request is made to allocate exception space for an exception structure.
+ This structure needs to survive beyond the current activation. This structure
+ will contain the type and value of the object being thrown.
+
+#. A call is made to the runtime to raise the exception, passing the exception
+ structure as an argument.
+
+In C++, the allocation of the exception structure is done by the
+``__cxa_allocate_exception`` runtime function. The exception raising is handled
+by ``__cxa_throw``. The type of the exception is represented using a C++ RTTI
+structure.
+
+Try/Catch
+---------
+
+A call within the scope of a *try* statement can potentially raise an
+exception. In those circumstances, the LLVM C++ front-end replaces the call with
+an ``invoke`` instruction. Unlike a call, the ``invoke`` has two potential
+continuation points:
+
+#. where to continue when the call succeeds as per normal, and
+
+#. where to continue if the call raises an exception, either by a throw or the
+ unwinding of a throw
+
+The term used to define a the place where an ``invoke`` continues after an
+exception is called a *landing pad*. LLVM landing pads are conceptually
+alternative function entry points where an exception structure reference and a
+type info index are passed in as arguments. The landing pad saves the exception
+structure reference and then proceeds to select the catch block that corresponds
+to the type info of the exception object.
+
+The LLVM `landingpad instruction <LangRef.html#i_landingpad>`_ is used to convey
+information about the landing pad to the back end. For C++, the ``landingpad``
+instruction returns a pointer and integer pair corresponding to the pointer to
+the *exception structure* and the *selector value* respectively.
+
+The ``landingpad`` instruction takes a reference to the personality function to
+be used for this ``try``/``catch`` sequence. The remainder of the instruction is
+a list of *cleanup*, *catch*, and *filter* clauses. The exception is tested
+against the clauses sequentially from first to last. The selector value is a
+positive number if the exception matched a type info, a negative number if it
+matched a filter, and zero if it matched a cleanup. If nothing is matched, the
+behavior of the program is `undefined`_. If a type info matched, then the
+selector value is the index of the type info in the exception table, which can
+be obtained using the `llvm.eh.typeid.for`_ intrinsic.
+
+Once the landing pad has the type info selector, the code branches to the code
+for the first catch. The catch then checks the value of the type info selector
+against the index of type info for that catch. Since the type info index is not
+known until all the type infos have been gathered in the backend, the catch code
+must call the `llvm.eh.typeid.for`_ intrinsic to determine the index for a given
+type info. If the catch fails to match the selector then control is passed on to
+the next catch.
+
+Finally, the entry and exit of catch code is bracketed with calls to
+``__cxa_begin_catch`` and ``__cxa_end_catch``.
+
+* ``__cxa_begin_catch`` takes an exception structure reference as an argument
+ and returns the value of the exception object.
+
+* ``__cxa_end_catch`` takes no arguments. This function:
+
+ #. Locates the most recently caught exception and decrements its handler
+ count,
+
+ #. Removes the exception from the *caught* stack if the handler count goes to
+ zero, and
+
+ #. Destroys the exception if the handler count goes to zero and the exception
+ was not re-thrown by throw.
+
+ .. note::
+
+ a rethrow from within the catch may replace this call with a
+ ``__cxa_rethrow``.
+
+Cleanups
+--------
+
+A cleanup is extra code which needs to be run as part of unwinding a scope. C++
+destructors are a typical example, but other languages and language extensions
+provide a variety of different kinds of cleanups. In general, a landing pad may
+need to run arbitrary amounts of cleanup code before actually entering a catch
+block. To indicate the presence of cleanups, a `landingpad
+instruction <LangRef.html#i_landingpad>`_ should have a *cleanup*
+clause. Otherwise, the unwinder will not stop at the landing pad if there are no
+catches or filters that require it to.
+
+.. note::
+
+ Do not allow a new exception to propagate out of the execution of a
+ cleanup. This can corrupt the internal state of the unwinder. Different
+ languages describe different high-level semantics for these situations: for
+ example, C++ requires that the process be terminated, whereas Ada cancels both
+ exceptions and throws a third.
+
+When all cleanups are finished, if the exception is not handled by the current
+function, resume unwinding by calling the `resume
+instruction <LangRef.html#i_resume>`_, passing in the result of the
+``landingpad`` instruction for the original landing pad.
+
+Throw Filters
+-------------
+
+C++ allows the specification of which exception types may be thrown from a
+function. To represent this, a top level landing pad may exist to filter out
+invalid types. To express this in LLVM code the `landingpad
+instruction <LangRef.html#i_landingpad>`_ will have a filter clause. The clause
+consists of an array of type infos. ``landingpad`` will return a negative value
+if the exception does not match any of the type infos. If no match is found then
+a call to ``__cxa_call_unexpected`` should be made, otherwise
+``_Unwind_Resume``. Each of these functions requires a reference to the
+exception structure. Note that the most general form of a ``landingpad``
+instruction can have any number of catch, cleanup, and filter clauses (though
+having more than one cleanup is pointless). The LLVM C++ front-end can generate
+such ``landingpad`` instructions due to inlining creating nested exception
+handling scopes.
+
+.. _undefined:
+
+Restrictions
+------------
+
+The unwinder delegates the decision of whether to stop in a call frame to that
+call frame's language-specific personality function. Not all unwinders guarantee
+that they will stop to perform cleanups. For example, the GNU C++ unwinder
+doesn't do so unless the exception is actually caught somewhere further up the
+stack.
+
+In order for inlining to behave correctly, landing pads must be prepared to
+handle selector results that they did not originally advertise. Suppose that a
+function catches exceptions of type ``A``, and it's inlined into a function that
+catches exceptions of type ``B``. The inliner will update the ``landingpad``
+instruction for the inlined landing pad to include the fact that ``B`` is also
+caught. If that landing pad assumes that it will only be entered to catch an
+``A``, it's in for a rude awakening. Consequently, landing pads must test for
+the selector results they understand and then resume exception propagation with
+the `resume instruction <LangRef.html#i_resume>`_ if none of the conditions
+match.
+
+Exception Handling Intrinsics
+=============================
+
+In addition to the ``landingpad`` and ``resume`` instructions, LLVM uses several
+intrinsic functions (name prefixed with ``llvm.eh``) to provide exception
+handling information at various points in generated code.
+
+.. _llvm.eh.typeid.for:
+
+llvm.eh.typeid.for
+------------------
+
+.. code-block:: llvm
+
+ i32 @llvm.eh.typeid.for(i8* %type_info)
+
+
+This intrinsic returns the type info index in the exception table of the current
+function. This value can be used to compare against the result of
+``landingpad`` instruction. The single argument is a reference to a type info.
+
+.. _llvm.eh.sjlj.setjmp:
+
+llvm.eh.sjlj.setjmp
+-------------------
+
+.. code-block:: llvm
+
+ i32 @llvm.eh.sjlj.setjmp(i8* %setjmp_buf)
+
+For SJLJ based exception handling, this intrinsic forces register saving for the
+current function and stores the address of the following instruction for use as
+a destination address by `llvm.eh.sjlj.longjmp`_. The buffer format and the
+overall functioning of this intrinsic is compatible with the GCC
+``__builtin_setjmp`` implementation allowing code built with the clang and GCC
+to interoperate.
+
+The single parameter is a pointer to a five word buffer in which the calling
+context is saved. The front end places the frame pointer in the first word, and
+the target implementation of this intrinsic should place the destination address
+for a `llvm.eh.sjlj.longjmp`_ in the second word. The following three words are
+available for use in a target-specific manner.
+
+.. _llvm.eh.sjlj.longjmp:
+
+llvm.eh.sjlj.longjmp
+--------------------
+
+.. code-block:: llvm
+
+ void @llvm.eh.sjlj.longjmp(i8* %setjmp_buf)
+
+For SJLJ based exception handling, the ``llvm.eh.sjlj.longjmp`` intrinsic is
+used to implement ``__builtin_longjmp()``. The single parameter is a pointer to
+a buffer populated by `llvm.eh.sjlj.setjmp`_. The frame pointer and stack
+pointer are restored from the buffer, then control is transferred to the
+destination address.
+
+llvm.eh.sjlj.lsda
+-----------------
+
+.. code-block:: llvm
+
+ i8* @llvm.eh.sjlj.lsda()
+
+For SJLJ based exception handling, the ``llvm.eh.sjlj.lsda`` intrinsic returns
+the address of the Language Specific Data Area (LSDA) for the current
+function. The SJLJ front-end code stores this address in the exception handling
+function context for use by the runtime.
+
+llvm.eh.sjlj.callsite
+---------------------
+
+.. code-block:: llvm
+
+ void @llvm.eh.sjlj.callsite(i32 %call_site_num)
+
+For SJLJ based exception handling, the ``llvm.eh.sjlj.callsite`` intrinsic
+identifies the callsite value associated with the following ``invoke``
+instruction. This is used to ensure that landing pad entries in the LSDA are
+generated in matching order.
+
+Asm Table Formats
+=================
+
+There are two tables that are used by the exception handling runtime to
+determine which actions should be taken when an exception is thrown.
+
+Exception Handling Frame
+------------------------
+
+An exception handling frame ``eh_frame`` is very similar to the unwind frame
+used by DWARF debug info. The frame contains all the information necessary to
+tear down the current frame and restore the state of the prior frame. There is
+an exception handling frame for each function in a compile unit, plus a common
+exception handling frame that defines information common to all functions in the
+unit.
+
+Exception Tables
+----------------
+
+An exception table contains information about what actions to take when an
+exception is thrown in a particular part of a function's code. There is one
+exception table per function, except leaf functions and functions that have
+calls only to non-throwing functions. They do not need an exception table.
diff --git a/docs/ExtendedIntegerResults.txt b/docs/ExtendedIntegerResults.txt
new file mode 100644
index 00000000000..44e9fbf0e76
--- /dev/null
+++ b/docs/ExtendedIntegerResults.txt
@@ -0,0 +1,133 @@
+//===----------------------------------------------------------------------===//
+// Representing sign/zero extension of function results
+//===----------------------------------------------------------------------===//
+
+Mar 25, 2009 - Initial Revision
+
+Most ABIs specify that functions which return small integers do so in a
+specific integer GPR. This is an efficient way to go, but raises the question:
+if the returned value is smaller than the register, what do the high bits hold?
+
+There are three (interesting) possible answers: undefined, zero extended, or
+sign extended. The number of bits in question depends on the data-type that
+the front-end is referencing (typically i1/i8/i16/i32).
+
+Knowing the answer to this is important for two reasons: 1) we want to be able
+to implement the ABI correctly. If we need to sign extend the result according
+to the ABI, we really really do need to do this to preserve correctness. 2)
+this information is often useful for optimization purposes, and we want the
+mid-level optimizers to be able to process this (e.g. eliminate redundant
+extensions).
+
+For example, lets pretend that X86 requires the caller to properly extend the
+result of a return (I'm not sure this is the case, but the argument doesn't
+depend on this). Given this, we should compile this:
+
+int a();
+short b() { return a(); }
+
+into:
+
+_b:
+ subl $12, %esp
+ call L_a$stub
+ addl $12, %esp
+ cwtl
+ ret
+
+An optimization example is that we should be able to eliminate the explicit
+sign extension in this example:
+
+short y();
+int z() {
+ return ((int)y() << 16) >> 16;
+}
+
+_z:
+ subl $12, %esp
+ call _y
+ ;; movswl %ax, %eax -> not needed because eax is already sext'd
+ addl $12, %esp
+ ret
+
+//===----------------------------------------------------------------------===//
+// What we have right now.
+//===----------------------------------------------------------------------===//
+
+Currently, these sorts of things are modelled by compiling a function to return
+the small type and a signext/zeroext marker is used. For example, we compile
+Z into:
+
+define i32 @z() nounwind {
+entry:
+ %0 = tail call signext i16 (...)* @y() nounwind
+ %1 = sext i16 %0 to i32
+ ret i32 %1
+}
+
+and b into:
+
+define signext i16 @b() nounwind {
+entry:
+ %0 = tail call i32 (...)* @a() nounwind ; <i32> [#uses=1]
+ %retval12 = trunc i32 %0 to i16 ; <i16> [#uses=1]
+ ret i16 %retval12
+}
+
+This has some problems: 1) the actual precise semantics are really poorly
+defined (see PR3779). 2) some targets might want the caller to extend, some
+might want the callee to extend 3) the mid-level optimizer doesn't know the
+size of the GPR, so it doesn't know that %0 is sign extended up to 32-bits
+here, and even if it did, it could not eliminate the sext. 4) the code
+generator has historically assumed that the result is extended to i32, which is
+a problem on PIC16 (and is also probably wrong on alpha and other 64-bit
+targets).
+
+//===----------------------------------------------------------------------===//
+// The proposal
+//===----------------------------------------------------------------------===//
+
+I suggest that we have the front-end fully lower out the ABI issues here to
+LLVM IR. This makes it 100% explicit what is going on and means that there is
+no cause for confusion. For example, the cases above should compile into:
+
+define i32 @z() nounwind {
+entry:
+ %0 = tail call i32 (...)* @y() nounwind
+ %1 = trunc i32 %0 to i16
+ %2 = sext i16 %1 to i32
+ ret i32 %2
+}
+define i32 @b() nounwind {
+entry:
+ %0 = tail call i32 (...)* @a() nounwind
+ %retval12 = trunc i32 %0 to i16
+ %tmp = sext i16 %retval12 to i32
+ ret i32 %tmp
+}
+
+In this model, no functions will return an i1/i8/i16 (and on a x86-64 target
+that extends results to i64, no i32). This solves the ambiguity issue, allows us
+to fully describe all possible ABIs, and now allows the optimizers to reason
+about and eliminate these extensions.
+
+The one thing that is missing is the ability for the front-end and optimizer to
+specify/infer the guarantees provided by the ABI to allow other optimizations.
+For example, in the y/z case, since y is known to return a sign extended value,
+the trunc/sext in z should be eliminable.
+
+This can be done by introducing new sext/zext attributes which mean "I know
+that the result of the function is sign extended at least N bits. Given this,
+and given that it is stuck on the y function, the mid-level optimizer could
+easily eliminate the extensions etc with existing functionality.
+
+The major disadvantage of doing this sort of thing is that it makes the ABI
+lowering stuff even more explicit in the front-end, and that we would like to
+eventually move to having the code generator do more of this work. However,
+the sad truth of the matter is that this is a) unlikely to happen anytime in
+the near future, and b) this is no worse than we have now with the existing
+attributes.
+
+C compilers fundamentally have to reason about the target in many ways.
+This is ugly and horrible, but a fact of life.
+
diff --git a/docs/ExtendingLLVM.html b/docs/ExtendingLLVM.html
new file mode 100644
index 00000000000..99e209b8940
--- /dev/null
+++ b/docs/ExtendingLLVM.html
@@ -0,0 +1,379 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>Extending LLVM: Adding instructions, intrinsics, types, etc.</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>
+ Extending LLVM: Adding instructions, intrinsics, types, etc.
+</h1>
+
+<ol>
+ <li><a href="#introduction">Introduction and Warning</a></li>
+ <li><a href="#intrinsic">Adding a new intrinsic function</a></li>
+ <li><a href="#instruction">Adding a new instruction</a></li>
+ <li><a href="#sdnode">Adding a new SelectionDAG node</a></li>
+ <li><a href="#type">Adding a new type</a>
+ <ol>
+ <li><a href="#fund_type">Adding a new fundamental type</a></li>
+ <li><a href="#derived_type">Adding a new derived type</a></li>
+ </ol></li>
+</ol>
+
+<div class="doc_author">
+ <p>Written by <a href="http://misha.brukman.net">Misha Brukman</a>,
+ Brad Jones, Nate Begeman,
+ and <a href="http://nondot.org/sabre">Chris Lattner</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="introduction">Introduction and Warning</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>During the course of using LLVM, you may wish to customize it for your
+research project or for experimentation. At this point, you may realize that
+you need to add something to LLVM, whether it be a new fundamental type, a new
+intrinsic function, or a whole new instruction.</p>
+
+<p>When you come to this realization, stop and think. Do you really need to
+extend LLVM? Is it a new fundamental capability that LLVM does not support at
+its current incarnation or can it be synthesized from already pre-existing LLVM
+elements? If you are not sure, ask on the <a
+href="http://mail.cs.uiuc.edu/mailman/listinfo/llvmdev">LLVM-dev</a> list. The
+reason is that extending LLVM will get involved as you need to update all the
+different passes that you intend to use with your extension, and there are
+<em>many</em> LLVM analyses and transformations, so it may be quite a bit of
+work.</p>
+
+<p>Adding an <a href="#intrinsic">intrinsic function</a> is far easier than
+adding an instruction, and is transparent to optimization passes. If your added
+functionality can be expressed as a
+function call, an intrinsic function is the method of choice for LLVM
+extension.</p>
+
+<p>Before you invest a significant amount of effort into a non-trivial
+extension, <span class="doc_warning">ask on the list</span> if what you are
+looking to do can be done with already-existing infrastructure, or if maybe
+someone else is already working on it. You will save yourself a lot of time and
+effort by doing so.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="intrinsic">Adding a new intrinsic function</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Adding a new intrinsic function to LLVM is much easier than adding a new
+instruction. Almost all extensions to LLVM should start as an intrinsic
+function and then be turned into an instruction if warranted.</p>
+
+<ol>
+<li><tt>llvm/docs/LangRef.html</tt>:
+ Document the intrinsic. Decide whether it is code generator specific and
+ what the restrictions are. Talk to other people about it so that you are
+ sure it's a good idea.</li>
+
+<li><tt>llvm/include/llvm/Intrinsics*.td</tt>:
+ Add an entry for your intrinsic. Describe its memory access characteristics
+ for optimization (this controls whether it will be DCE'd, CSE'd, etc). Note
+ that any intrinsic using the <tt>llvm_int_ty</tt> type for an argument will
+ be deemed by <tt>tblgen</tt> as overloaded and the corresponding suffix
+ will be required on the intrinsic's name.</li>
+
+<li><tt>llvm/lib/Analysis/ConstantFolding.cpp</tt>: If it is possible to
+ constant fold your intrinsic, add support to it in the
+ <tt>canConstantFoldCallTo</tt> and <tt>ConstantFoldCall</tt> functions.</li>
+
+<li><tt>llvm/test/Regression/*</tt>: Add test cases for your test cases to the
+ test suite</li>
+</ol>
+
+<p>Once the intrinsic has been added to the system, you must add code generator
+support for it. Generally you must do the following steps:</p>
+
+<dl>
+
+<dt>Add support to the .td file for the target(s) of your choice in
+ <tt>lib/Target/*/*.td</tt>.</dt>
+
+<dd>This is usually a matter of adding a pattern to the .td file that matches
+ the intrinsic, though it may obviously require adding the instructions you
+ want to generate as well. There are lots of examples in the PowerPC and X86
+ backend to follow.</dd>
+</dl>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="sdnode">Adding a new SelectionDAG node</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>As with intrinsics, adding a new SelectionDAG node to LLVM is much easier
+than adding a new instruction. New nodes are often added to help represent
+instructions common to many targets. These nodes often map to an LLVM
+instruction (add, sub) or intrinsic (byteswap, population count). In other
+cases, new nodes have been added to allow many targets to perform a common task
+(converting between floating point and integer representation) or capture more
+complicated behavior in a single node (rotate).</p>
+
+<ol>
+<li><tt>include/llvm/CodeGen/ISDOpcodes.h</tt>:
+ Add an enum value for the new SelectionDAG node.</li>
+<li><tt>lib/CodeGen/SelectionDAG/SelectionDAG.cpp</tt>:
+ Add code to print the node to <tt>getOperationName</tt>. If your new node
+ can be evaluated at compile time when given constant arguments (such as an
+ add of a constant with another constant), find the <tt>getNode</tt> method
+ that takes the appropriate number of arguments, and add a case for your node
+ to the switch statement that performs constant folding for nodes that take
+ the same number of arguments as your new node.</li>
+<li><tt>lib/CodeGen/SelectionDAG/LegalizeDAG.cpp</tt>:
+ Add code to <a href="CodeGenerator.html#selectiondag_legalize">legalize,
+ promote, and expand</a> the node as necessary. At a minimum, you will need
+ to add a case statement for your node in <tt>LegalizeOp</tt> which calls
+ LegalizeOp on the node's operands, and returns a new node if any of the
+ operands changed as a result of being legalized. It is likely that not all
+ targets supported by the SelectionDAG framework will natively support the
+ new node. In this case, you must also add code in your node's case
+ statement in <tt>LegalizeOp</tt> to Expand your node into simpler, legal
+ operations. The case for <tt>ISD::UREM</tt> for expanding a remainder into
+ a divide, multiply, and a subtract is a good example.</li>
+<li><tt>lib/CodeGen/SelectionDAG/LegalizeDAG.cpp</tt>:
+ If targets may support the new node being added only at certain sizes, you
+ will also need to add code to your node's case statement in
+ <tt>LegalizeOp</tt> to Promote your node's operands to a larger size, and
+ perform the correct operation. You will also need to add code to
+ <tt>PromoteOp</tt> to do this as well. For a good example, see
+ <tt>ISD::BSWAP</tt>,
+ which promotes its operand to a wider size, performs the byteswap, and then
+ shifts the correct bytes right to emulate the narrower byteswap in the
+ wider type.</li>
+<li><tt>lib/CodeGen/SelectionDAG/LegalizeDAG.cpp</tt>:
+ Add a case for your node in <tt>ExpandOp</tt> to teach the legalizer how to
+ perform the action represented by the new node on a value that has been
+ split into high and low halves. This case will be used to support your
+ node with a 64 bit operand on a 32 bit target.</li>
+<li><tt>lib/CodeGen/SelectionDAG/DAGCombiner.cpp</tt>:
+ If your node can be combined with itself, or other existing nodes in a
+ peephole-like fashion, add a visit function for it, and call that function
+ from <tt></tt>. There are several good examples for simple combines you
+ can do; <tt>visitFABS</tt> and <tt>visitSRL</tt> are good starting places.
+ </li>
+<li><tt>lib/Target/PowerPC/PPCISelLowering.cpp</tt>:
+ Each target has an implementation of the <tt>TargetLowering</tt> class,
+ usually in its own file (although some targets include it in the same
+ file as the DAGToDAGISel). The default behavior for a target is to
+ assume that your new node is legal for all types that are legal for
+ that target. If this target does not natively support your node, then
+ tell the target to either Promote it (if it is supported at a larger
+ type) or Expand it. This will cause the code you wrote in
+ <tt>LegalizeOp</tt> above to decompose your new node into other legal
+ nodes for this target.</li>
+<li><tt>lib/Target/TargetSelectionDAG.td</tt>:
+ Most current targets supported by LLVM generate code using the DAGToDAG
+ method, where SelectionDAG nodes are pattern matched to target-specific
+ nodes, which represent individual instructions. In order for the targets
+ to match an instruction to your new node, you must add a def for that node
+ to the list in this file, with the appropriate type constraints. Look at
+ <tt>add</tt>, <tt>bswap</tt>, and <tt>fadd</tt> for examples.</li>
+<li><tt>lib/Target/PowerPC/PPCInstrInfo.td</tt>:
+ Each target has a tablegen file that describes the target's instruction
+ set. For targets that use the DAGToDAG instruction selection framework,
+ add a pattern for your new node that uses one or more target nodes.
+ Documentation for this is a bit sparse right now, but there are several
+ decent examples. See the patterns for <tt>rotl</tt> in
+ <tt>PPCInstrInfo.td</tt>.</li>
+<li>TODO: document complex patterns.</li>
+<li><tt>llvm/test/Regression/CodeGen/*</tt>: Add test cases for your new node
+ to the test suite. <tt>llvm/test/Regression/CodeGen/X86/bswap.ll</tt> is
+ a good example.</li>
+</ol>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="instruction">Adding a new instruction</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p><span class="doc_warning">WARNING: adding instructions changes the bitcode
+format, and it will take some effort to maintain compatibility with
+the previous version.</span> Only add an instruction if it is absolutely
+necessary.</p>
+
+<ol>
+
+<li><tt>llvm/include/llvm/Instruction.def</tt>:
+ add a number for your instruction and an enum name</li>
+
+<li><tt>llvm/include/llvm/Instructions.h</tt>:
+ add a definition for the class that will represent your instruction</li>
+
+<li><tt>llvm/include/llvm/Support/InstVisitor.h</tt>:
+ add a prototype for a visitor to your new instruction type</li>
+
+<li><tt>llvm/lib/AsmParser/Lexer.l</tt>:
+ add a new token to parse your instruction from assembly text file</li>
+
+<li><tt>llvm/lib/AsmParser/llvmAsmParser.y</tt>:
+ add the grammar on how your instruction can be read and what it will
+ construct as a result</li>
+
+<li><tt>llvm/lib/Bitcode/Reader/Reader.cpp</tt>:
+ add a case for your instruction and how it will be parsed from bitcode</li>
+
+<li><tt>llvm/lib/VMCore/Instruction.cpp</tt>:
+ add a case for how your instruction will be printed out to assembly</li>
+
+<li><tt>llvm/lib/VMCore/Instructions.cpp</tt>:
+ implement the class you defined in
+ <tt>llvm/include/llvm/Instructions.h</tt></li>
+
+<li>Test your instruction</li>
+
+<li><tt>llvm/lib/Target/*</tt>:
+ Add support for your instruction to code generators, or add a lowering
+ pass.</li>
+
+<li><tt>llvm/test/Regression/*</tt>: add your test cases to the test suite.</li>
+
+</ol>
+
+<p>Also, you need to implement (or modify) any analyses or passes that you want
+to understand this new instruction.</p>
+
+</div>
+
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="type">Adding a new type</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p><span class="doc_warning">WARNING: adding new types changes the bitcode
+format, and will break compatibility with currently-existing LLVM
+installations.</span> Only add new types if it is absolutely necessary.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="fund_type">Adding a fundamental type</a>
+</h3>
+
+<div>
+
+<ol>
+
+<li><tt>llvm/include/llvm/Type.h</tt>:
+ add enum for the new type; add static <tt>Type*</tt> for this type</li>
+
+<li><tt>llvm/lib/VMCore/Type.cpp</tt>:
+ add mapping from <tt>TypeID</tt> =&gt; <tt>Type*</tt>;
+ initialize the static <tt>Type*</tt></li>
+
+<li><tt>llvm/lib/AsmReader/Lexer.l</tt>:
+ add ability to parse in the type from text assembly</li>
+
+<li><tt>llvm/lib/AsmReader/llvmAsmParser.y</tt>:
+ add a token for that type</li>
+
+</ol>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="derived_type">Adding a derived type</a>
+</h3>
+
+<div>
+
+<ol>
+<li><tt>llvm/include/llvm/Type.h</tt>:
+ add enum for the new type; add a forward declaration of the type
+ also</li>
+
+<li><tt>llvm/include/llvm/DerivedTypes.h</tt>:
+ add new class to represent new class in the hierarchy; add forward
+ declaration to the TypeMap value type</li>
+
+<li><tt>llvm/lib/VMCore/Type.cpp</tt>:
+ add support for derived type to:
+<div class="doc_code">
+<pre>
+std::string getTypeDescription(const Type &amp;Ty,
+ std::vector&lt;const Type*&gt; &amp;TypeStack)
+bool TypesEqual(const Type *Ty, const Type *Ty2,
+ std::map&lt;const Type*, const Type*&gt; &amp; EqTypes)
+</pre>
+</div>
+ add necessary member functions for type, and factory methods</li>
+
+<li><tt>llvm/lib/AsmReader/Lexer.l</tt>:
+ add ability to parse in the type from text assembly</li>
+
+<li><tt>llvm/lib/BitCode/Writer/Writer.cpp</tt>:
+ modify <tt>void BitcodeWriter::outputType(const Type *T)</tt> to serialize
+ your type</li>
+
+<li><tt>llvm/lib/BitCode/Reader/Reader.cpp</tt>:
+ modify <tt>const Type *BitcodeReader::ParseType()</tt> to read your data
+ type</li>
+
+<li><tt>llvm/lib/VMCore/AsmWriter.cpp</tt>:
+ modify
+<div class="doc_code">
+<pre>
+void calcTypeName(const Type *Ty,
+ std::vector&lt;const Type*&gt; &amp;TypeStack,
+ std::map&lt;const Type*,std::string&gt; &amp;TypeNames,
+ std::string &amp; Result)
+</pre>
+</div>
+ to output the new derived type
+</li>
+
+
+</ol>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a>
+ <br>
+ Last modified: $Date$
+</address>
+
+</body>
+</html>
diff --git a/docs/FAQ.rst b/docs/FAQ.rst
new file mode 100644
index 00000000000..b0e3ca04569
--- /dev/null
+++ b/docs/FAQ.rst
@@ -0,0 +1,464 @@
+.. _faq:
+
+================================
+Frequently Asked Questions (FAQ)
+================================
+
+.. contents::
+ :local:
+
+
+License
+=======
+
+Does the University of Illinois Open Source License really qualify as an "open source" license?
+-----------------------------------------------------------------------------------------------
+Yes, the license is `certified
+<http://www.opensource.org/licenses/UoI-NCSA.php>`_ by the Open Source
+Initiative (OSI).
+
+
+Can I modify LLVM source code and redistribute the modified source?
+-------------------------------------------------------------------
+Yes. The modified source distribution must retain the copyright notice and
+follow the three bulletted conditions listed in the `LLVM license
+<http://llvm.org/svn/llvm-project/llvm/trunk/LICENSE.TXT>`_.
+
+
+Can I modify the LLVM source code and redistribute binaries or other tools based on it, without redistributing the source?
+--------------------------------------------------------------------------------------------------------------------------
+Yes. This is why we distribute LLVM under a less restrictive license than GPL,
+as explained in the first question above.
+
+
+Source Code
+===========
+
+In what language is LLVM written?
+---------------------------------
+All of the LLVM tools and libraries are written in C++ with extensive use of
+the STL.
+
+
+How portable is the LLVM source code?
+-------------------------------------
+The LLVM source code should be portable to most modern Unix-like operating
+systems. Most of the code is written in standard C++ with operating system
+services abstracted to a support library. The tools required to build and
+test LLVM have been ported to a plethora of platforms.
+
+Some porting problems may exist in the following areas:
+
+* The autoconf/makefile build system relies heavily on UNIX shell tools,
+ like the Bourne Shell and sed. Porting to systems without these tools
+ (MacOS 9, Plan 9) will require more effort.
+
+
+Build Problems
+==============
+
+When I run configure, it finds the wrong C compiler.
+----------------------------------------------------
+The ``configure`` script attempts to locate first ``gcc`` and then ``cc``,
+unless it finds compiler paths set in ``CC`` and ``CXX`` for the C and C++
+compiler, respectively.
+
+If ``configure`` finds the wrong compiler, either adjust your ``PATH``
+environment variable or set ``CC`` and ``CXX`` explicitly.
+
+
+The ``configure`` script finds the right C compiler, but it uses the LLVM tools from a previous build. What do I do?
+---------------------------------------------------------------------------------------------------------------------
+The ``configure`` script uses the ``PATH`` to find executables, so if it's
+grabbing the wrong linker/assembler/etc, there are two ways to fix it:
+
+#. Adjust your ``PATH`` environment variable so that the correct program
+ appears first in the ``PATH``. This may work, but may not be convenient
+ when you want them *first* in your path for other work.
+
+#. Run ``configure`` with an alternative ``PATH`` that is correct. In a
+ Bourne compatible shell, the syntax would be:
+
+.. code-block:: bash
+
+ % PATH=[the path without the bad program] ./configure ...
+
+This is still somewhat inconvenient, but it allows ``configure`` to do its
+work without having to adjust your ``PATH`` permanently.
+
+
+When creating a dynamic library, I get a strange GLIBC error.
+-------------------------------------------------------------
+Under some operating systems (i.e. Linux), libtool does not work correctly if
+GCC was compiled with the ``--disable-shared option``. To work around this,
+install your own version of GCC that has shared libraries enabled by default.
+
+
+I've updated my source tree from Subversion, and now my build is trying to use a file/directory that doesn't exist.
+-------------------------------------------------------------------------------------------------------------------
+You need to re-run configure in your object directory. When new Makefiles
+are added to the source tree, they have to be copied over to the object tree
+in order to be used by the build.
+
+
+I've modified a Makefile in my source tree, but my build tree keeps using the old version. What do I do?
+---------------------------------------------------------------------------------------------------------
+If the Makefile already exists in your object tree, you can just run the
+following command in the top level directory of your object tree:
+
+.. code-block:: bash
+
+ % ./config.status <relative path to Makefile>;
+
+If the Makefile is new, you will have to modify the configure script to copy
+it over.
+
+
+I've upgraded to a new version of LLVM, and I get strange build errors.
+-----------------------------------------------------------------------
+Sometimes, changes to the LLVM source code alters how the build system works.
+Changes in ``libtool``, ``autoconf``, or header file dependencies are
+especially prone to this sort of problem.
+
+The best thing to try is to remove the old files and re-build. In most cases,
+this takes care of the problem. To do this, just type ``make clean`` and then
+``make`` in the directory that fails to build.
+
+
+I've built LLVM and am testing it, but the tests freeze.
+--------------------------------------------------------
+This is most likely occurring because you built a profile or release
+(optimized) build of LLVM and have not specified the same information on the
+``gmake`` command line.
+
+For example, if you built LLVM with the command:
+
+.. code-block:: bash
+
+ % gmake ENABLE_PROFILING=1
+
+...then you must run the tests with the following commands:
+
+.. code-block:: bash
+
+ % cd llvm/test
+ % gmake ENABLE_PROFILING=1
+
+Why do test results differ when I perform different types of builds?
+--------------------------------------------------------------------
+The LLVM test suite is dependent upon several features of the LLVM tools and
+libraries.
+
+First, the debugging assertions in code are not enabled in optimized or
+profiling builds. Hence, tests that used to fail may pass.
+
+Second, some tests may rely upon debugging options or behavior that is only
+available in the debug build. These tests will fail in an optimized or
+profile build.
+
+
+Compiling LLVM with GCC 3.3.2 fails, what should I do?
+------------------------------------------------------
+This is `a bug in GCC <http://gcc.gnu.org/bugzilla/show_bug.cgi?id=13392>`_,
+and affects projects other than LLVM. Try upgrading or downgrading your GCC.
+
+
+Compiling LLVM with GCC succeeds, but the resulting tools do not work, what can be wrong?
+-----------------------------------------------------------------------------------------
+Several versions of GCC have shown a weakness in miscompiling the LLVM
+codebase. Please consult your compiler version (``gcc --version``) to find
+out whether it is `broken <GettingStarted.html#brokengcc>`_. If so, your only
+option is to upgrade GCC to a known good version.
+
+
+After Subversion update, rebuilding gives the error "No rule to make target".
+-----------------------------------------------------------------------------
+If the error is of the form:
+
+.. code-block:: bash
+
+ gmake[2]: *** No rule to make target `/path/to/somefile',
+ needed by `/path/to/another/file.d'.
+ Stop.
+
+This may occur anytime files are moved within the Subversion repository or
+removed entirely. In this case, the best solution is to erase all ``.d``
+files, which list dependencies for source files, and rebuild:
+
+.. code-block:: bash
+
+ % cd $LLVM_OBJ_DIR
+ % rm -f `find . -name \*\.d`
+ % gmake
+
+In other cases, it may be necessary to run ``make clean`` before rebuilding.
+
+
+Source Languages
+================
+
+What source languages are supported?
+------------------------------------
+LLVM currently has full support for C and C++ source languages. These are
+available through both `Clang <http://clang.llvm.org/>`_ and `DragonEgg
+<http://dragonegg.llvm.org/>`_.
+
+The PyPy developers are working on integrating LLVM into the PyPy backend so
+that PyPy language can translate to LLVM.
+
+
+I'd like to write a self-hosting LLVM compiler. How should I interface with the LLVM middle-end optimizers and back-end code generators?
+----------------------------------------------------------------------------------------------------------------------------------------
+Your compiler front-end will communicate with LLVM by creating a module in the
+LLVM intermediate representation (IR) format. Assuming you want to write your
+language's compiler in the language itself (rather than C++), there are 3
+major ways to tackle generating LLVM IR from a front-end:
+
+1. **Call into the LLVM libraries code using your language's FFI (foreign
+ function interface).**
+
+ * *for:* best tracks changes to the LLVM IR, .ll syntax, and .bc format
+
+ * *for:* enables running LLVM optimization passes without a emit/parse
+ overhead
+
+ * *for:* adapts well to a JIT context
+
+ * *against:* lots of ugly glue code to write
+
+2. **Emit LLVM assembly from your compiler's native language.**
+
+ * *for:* very straightforward to get started
+
+ * *against:* the .ll parser is slower than the bitcode reader when
+ interfacing to the middle end
+
+ * *against:* it may be harder to track changes to the IR
+
+3. **Emit LLVM bitcode from your compiler's native language.**
+
+ * *for:* can use the more-efficient bitcode reader when interfacing to the
+ middle end
+
+ * *against:* you'll have to re-engineer the LLVM IR object model and bitcode
+ writer in your language
+
+ * *against:* it may be harder to track changes to the IR
+
+If you go with the first option, the C bindings in include/llvm-c should help
+a lot, since most languages have strong support for interfacing with C. The
+most common hurdle with calling C from managed code is interfacing with the
+garbage collector. The C interface was designed to require very little memory
+management, and so is straightforward in this regard.
+
+What support is there for a higher level source language constructs for building a compiler?
+--------------------------------------------------------------------------------------------
+Currently, there isn't much. LLVM supports an intermediate representation
+which is useful for code representation but will not support the high level
+(abstract syntax tree) representation needed by most compilers. There are no
+facilities for lexical nor semantic analysis.
+
+
+I don't understand the ``GetElementPtr`` instruction. Help!
+-----------------------------------------------------------
+See `The Often Misunderstood GEP Instruction <GetElementPtr.html>`_.
+
+
+Using the C and C++ Front Ends
+==============================
+
+Can I compile C or C++ code to platform-independent LLVM bitcode?
+-----------------------------------------------------------------
+No. C and C++ are inherently platform-dependent languages. The most obvious
+example of this is the preprocessor. A very common way that C code is made
+portable is by using the preprocessor to include platform-specific code. In
+practice, information about other platforms is lost after preprocessing, so
+the result is inherently dependent on the platform that the preprocessing was
+targeting.
+
+Another example is ``sizeof``. It's common for ``sizeof(long)`` to vary
+between platforms. In most C front-ends, ``sizeof`` is expanded to a
+constant immediately, thus hard-wiring a platform-specific detail.
+
+Also, since many platforms define their ABIs in terms of C, and since LLVM is
+lower-level than C, front-ends currently must emit platform-specific IR in
+order to have the result conform to the platform ABI.
+
+
+Questions about code generated by the demo page
+===============================================
+
+What is this ``llvm.global_ctors`` and ``_GLOBAL__I_a...`` stuff that happens when I ``#include <iostream>``?
+-------------------------------------------------------------------------------------------------------------
+If you ``#include`` the ``<iostream>`` header into a C++ translation unit,
+the file will probably use the ``std::cin``/``std::cout``/... global objects.
+However, C++ does not guarantee an order of initialization between static
+objects in different translation units, so if a static ctor/dtor in your .cpp
+file used ``std::cout``, for example, the object would not necessarily be
+automatically initialized before your use.
+
+To make ``std::cout`` and friends work correctly in these scenarios, the STL
+that we use declares a static object that gets created in every translation
+unit that includes ``<iostream>``. This object has a static constructor
+and destructor that initializes and destroys the global iostream objects
+before they could possibly be used in the file. The code that you see in the
+``.ll`` file corresponds to the constructor and destructor registration code.
+
+If you would like to make it easier to *understand* the LLVM code generated
+by the compiler in the demo page, consider using ``printf()`` instead of
+``iostream``\s to print values.
+
+
+Where did all of my code go??
+-----------------------------
+If you are using the LLVM demo page, you may often wonder what happened to
+all of the code that you typed in. Remember that the demo script is running
+the code through the LLVM optimizers, so if your code doesn't actually do
+anything useful, it might all be deleted.
+
+To prevent this, make sure that the code is actually needed. For example, if
+you are computing some expression, return the value from the function instead
+of leaving it in a local variable. If you really want to constrain the
+optimizer, you can read from and assign to ``volatile`` global variables.
+
+
+What is this "``undef``" thing that shows up in my code?
+--------------------------------------------------------
+``undef`` is the LLVM way of representing a value that is not defined. You
+can get these if you do not initialize a variable before you use it. For
+example, the C function:
+
+.. code-block:: c
+
+ int X() { int i; return i; }
+
+Is compiled to "``ret i32 undef``" because "``i``" never has a value specified
+for it.
+
+
+Why does instcombine + simplifycfg turn a call to a function with a mismatched calling convention into "unreachable"? Why not make the verifier reject it?
+----------------------------------------------------------------------------------------------------------------------------------------------------------
+This is a common problem run into by authors of front-ends that are using
+custom calling conventions: you need to make sure to set the right calling
+convention on both the function and on each call to the function. For
+example, this code:
+
+.. code-block:: llvm
+
+ define fastcc void @foo() {
+ ret void
+ }
+ define void @bar() {
+ call void @foo()
+ ret void
+ }
+
+Is optimized to:
+
+.. code-block:: llvm
+
+ define fastcc void @foo() {
+ ret void
+ }
+ define void @bar() {
+ unreachable
+ }
+
+... with "``opt -instcombine -simplifycfg``". This often bites people because
+"all their code disappears". Setting the calling convention on the caller and
+callee is required for indirect calls to work, so people often ask why not
+make the verifier reject this sort of thing.
+
+The answer is that this code has undefined behavior, but it is not illegal.
+If we made it illegal, then every transformation that could potentially create
+this would have to ensure that it doesn't, and there is valid code that can
+create this sort of construct (in dead code). The sorts of things that can
+cause this to happen are fairly contrived, but we still need to accept them.
+Here's an example:
+
+.. code-block:: llvm
+
+ define fastcc void @foo() {
+ ret void
+ }
+ define internal void @bar(void()* %FP, i1 %cond) {
+ br i1 %cond, label %T, label %F
+ T:
+ call void %FP()
+ ret void
+ F:
+ call fastcc void %FP()
+ ret void
+ }
+ define void @test() {
+ %X = or i1 false, false
+ call void @bar(void()* @foo, i1 %X)
+ ret void
+ }
+
+In this example, "test" always passes ``@foo``/``false`` into ``bar``, which
+ensures that it is dynamically called with the right calling conv (thus, the
+code is perfectly well defined). If you run this through the inliner, you
+get this (the explicit "or" is there so that the inliner doesn't dead code
+eliminate a bunch of stuff):
+
+.. code-block:: llvm
+
+ define fastcc void @foo() {
+ ret void
+ }
+ define void @test() {
+ %X = or i1 false, false
+ br i1 %X, label %T.i, label %F.i
+ T.i:
+ call void @foo()
+ br label %bar.exit
+ F.i:
+ call fastcc void @foo()
+ br label %bar.exit
+ bar.exit:
+ ret void
+ }
+
+Here you can see that the inlining pass made an undefined call to ``@foo``
+with the wrong calling convention. We really don't want to make the inliner
+have to know about this sort of thing, so it needs to be valid code. In this
+case, dead code elimination can trivially remove the undefined code. However,
+if ``%X`` was an input argument to ``@test``, the inliner would produce this:
+
+.. code-block:: llvm
+
+ define fastcc void @foo() {
+ ret void
+ }
+
+ define void @test(i1 %X) {
+ br i1 %X, label %T.i, label %F.i
+ T.i:
+ call void @foo()
+ br label %bar.exit
+ F.i:
+ call fastcc void @foo()
+ br label %bar.exit
+ bar.exit:
+ ret void
+ }
+
+The interesting thing about this is that ``%X`` *must* be false for the
+code to be well-defined, but no amount of dead code elimination will be able
+to delete the broken call as unreachable. However, since
+``instcombine``/``simplifycfg`` turns the undefined call into unreachable, we
+end up with a branch on a condition that goes to unreachable: a branch to
+unreachable can never happen, so "``-inline -instcombine -simplifycfg``" is
+able to produce:
+
+.. code-block:: llvm
+
+ define fastcc void @foo() {
+ ret void
+ }
+ define void @test(i1 %X) {
+ F.i:
+ call fastcc void @foo()
+ ret void
+ }
diff --git a/docs/GCCFEBuildInstrs.html b/docs/GCCFEBuildInstrs.html
new file mode 100644
index 00000000000..0caf9d86186
--- /dev/null
+++ b/docs/GCCFEBuildInstrs.html
@@ -0,0 +1,279 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css" media="screen">
+ <title>Building the LLVM GCC Front-End</title>
+</head>
+<body>
+
+<h1>
+ Building the LLVM GCC Front-End
+</h1>
+
+<ol>
+ <li><a href="#instructions">Building llvm-gcc from Source</a></li>
+ <li><a href="#ada">Building the Ada front-end</a></li>
+ <li><a href="#fortran">Building the Fortran front-end</a></li>
+ <li><a href="#license">License Information</a></li>
+</ol>
+
+<div class="doc_author">
+ <p>Written by the LLVM Team</p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="instructions">Building llvm-gcc from Source</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>This section describes how to acquire and build llvm-gcc 4.2, which is based
+on the GCC 4.2.1 front-end. Supported languages are Ada, C, C++, Fortran,
+Objective-C and Objective-C++. Note that the instructions for building these
+front-ends are completely different (and much easier!) than those for building
+llvm-gcc3 in the past.</p>
+
+<ol>
+ <li><p>Retrieve the appropriate llvm-gcc-4.2-<i>version</i>.source.tar.gz
+ archive from the <a href="http://llvm.org/releases/">LLVM web
+ site</a>.</p>
+
+ <p>It is also possible to download the sources of the llvm-gcc front end
+ from a read-only mirror using subversion. To check out the 4.2 code
+ for first time use:</p>
+
+<div class="doc_code">
+<pre>
+svn co http://llvm.org/svn/llvm-project/llvm-gcc-4.2/trunk <i>dst-directory</i>
+</pre>
+</div>
+
+ <p>After that, the code can be be updated in the destination directory
+ using:</p>
+
+<div class="doc_code">
+<pre>svn update</pre>
+</div>
+
+ <p>The mirror is brought up to date every evening.</p></li>
+
+ <li>Follow the directions in the top-level <tt>README.LLVM</tt> file for
+ up-to-date instructions on how to build llvm-gcc. See below for building
+ with support for Ada or Fortran.
+</ol>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="ada">Building the Ada front-end</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+<p>Building with support for Ada amounts to following the directions in the
+top-level <tt>README.LLVM</tt> file, adding ",ada" to EXTRALANGS, for example:
+<tt>EXTRALANGS=,ada</tt></p>
+
+<p>There are some complications however:</p>
+
+<ol>
+ <li><p>The only platform for which the Ada front-end is known to build is
+ 32 bit intel x86 running linux. It is unlikely to build for other
+ systems without some work.</p></li>
+ <li><p>The build requires having a compiler that supports Ada, C and C++.
+ The Ada front-end is written in Ada so an Ada compiler is needed to
+ build it. Compilers known to work with the
+ <a href="http://llvm.org/releases/download.html">LLVM 2.7 release</a>
+ are <a href="http://gcc.gnu.org/releases.html">gcc-4.2</a> and the
+ 2005, 2006 and 2007 versions of the
+ <a href="http://libre.adacore.com/">GNAT GPL Edition</a>.
+ <b>GNAT GPL 2008, gcc-4.3 and later will not work</b>.
+ The LLVM parts of llvm-gcc are written in C++ so a C++ compiler is
+ needed to build them. The rest of gcc is written in C.
+ Some linux distributions provide a version of gcc that supports all
+ three languages (the Ada part often comes as an add-on package to
+ the rest of gcc). Otherwise it is possible to combine two versions
+ of gcc, one that supports Ada and C (such as the
+ <a href="http://libre.adacore.com/">2007 GNAT GPL Edition</a>)
+ and another which supports C++, see below.</p></li>
+ <li><p>Because the Ada front-end is experimental, it is wise to build the
+ compiler with checking enabled. This causes it to run much slower, but
+ helps catch mistakes in the compiler (please report any problems using
+ <a href="http://llvm.org/bugs/">LLVM bugzilla</a>).</p></li>
+ <li><p>The Ada front-end <a href="http://llvm.org/PR2007">fails to
+ bootstrap</a>, due to lack of LLVM support for
+ <tt>setjmp</tt>/<tt>longjmp</tt> style exception handling (used
+ internally by the compiler), so you must specify
+ <tt>--disable-bootstrap</tt>.</p></li>
+</ol>
+
+<p>Supposing appropriate compilers are available, llvm-gcc with Ada support can
+ be built on an x86-32 linux box using the following recipe:</p>
+
+<ol>
+ <li><p>Download the <a href="http://llvm.org/releases/download.html">LLVM source</a>
+ and unpack it:</p>
+
+<pre class="doc_code">
+wget http://llvm.org/releases/2.7/llvm-2.7.tgz
+tar xzf llvm-2.7.tgz
+mv llvm-2.7 llvm
+</pre>
+
+ <p>or <a href="GettingStarted.html#checkout">check out the
+ latest version from subversion</a>:</p>
+
+<pre class="doc_code">svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm</pre>
+
+ </li>
+
+ <li><p>Download the
+ <a href="http://llvm.org/releases/download.html">llvm-gcc-4.2 source</a>
+ and unpack it:</p>
+
+<pre class="doc_code">
+wget http://llvm.org/releases/2.7/llvm-gcc-4.2-2.7.source.tgz
+tar xzf llvm-gcc-4.2-2.7.source.tgz
+mv llvm-gcc-4.2-2.7.source llvm-gcc-4.2
+</pre>
+
+ <p>or <a href="GettingStarted.html#checkout">check out the
+ latest version from subversion</a>:</p>
+
+<pre class="doc_code">
+svn co http://llvm.org/svn/llvm-project/llvm-gcc-4.2/trunk llvm-gcc-4.2
+</pre>
+ </li>
+
+ <li><p>Make a build directory <tt>llvm-objects</tt> for llvm and make it the
+ current directory:</p>
+
+<pre class="doc_code">
+mkdir llvm-objects
+cd llvm-objects
+</pre>
+ </li>
+
+ <li><p>Configure LLVM (here it is configured to install into <tt>/usr/local</tt>):</p>
+
+<pre class="doc_code">
+../llvm/configure --prefix=<b>/usr/local</b> --enable-optimized --enable-assertions
+</pre>
+
+ <p>If you have a multi-compiler setup and the C++ compiler is not the
+ default, then you can configure like this:</p>
+
+<pre class="doc_code">
+CXX=<b>PATH_TO_C++_COMPILER</b> ../llvm/configure --prefix=<b>/usr/local</b> --enable-optimized --enable-assertions
+</pre>
+
+ <p>To compile without checking (not recommended), replace
+ <tt>--enable-assertions</tt> with <tt>--disable-assertions</tt>.</p>
+
+ </li>
+
+ <li><p>Build LLVM:</p>
+
+<pre class="doc_code">
+make
+</pre>
+ </li>
+
+ <li><p>Install LLVM (optional):</p>
+
+<pre class="doc_code">
+make install
+</pre>
+ </li>
+
+ <li><p>Make a build directory <tt>llvm-gcc-4.2-objects</tt> for llvm-gcc and make it the
+ current directory:</p>
+
+<pre class="doc_code">
+cd ..
+mkdir llvm-gcc-4.2-objects
+cd llvm-gcc-4.2-objects
+</pre>
+ </li>
+
+ <li><p>Configure llvm-gcc (here it is configured to install into <tt>/usr/local</tt>).
+ The <tt>--enable-checking</tt> flag turns on sanity checks inside the compiler.
+ To turn off these checks (not recommended), replace <tt>--enable-checking</tt>
+ with <tt>--disable-checking</tt>.
+ Additional languages can be appended to the <tt>--enable-languages</tt> switch,
+ for example <tt>--enable-languages=ada,c,c++</tt>.</p>
+
+<pre class="doc_code">
+../llvm-gcc-4.2/configure --prefix=<b>/usr/local</b> --enable-languages=ada,c \
+ --enable-checking --enable-llvm=$PWD/../llvm-objects \
+ --disable-bootstrap --disable-multilib
+</pre>
+
+ <p>If you have a multi-compiler setup, then you can configure like this:</p>
+
+<pre class="doc_code">
+export CC=<b>PATH_TO_C_AND_ADA_COMPILER</b>
+export CXX=<b>PATH_TO_C++_COMPILER</b>
+../llvm-gcc-4.2/configure --prefix=<b>/usr/local</b> --enable-languages=ada,c \
+ --enable-checking --enable-llvm=$PWD/../llvm-objects \
+ --disable-bootstrap --disable-multilib
+</pre>
+ </li>
+
+ <li><p>Build and install the compiler:</p>
+
+<pre class="doc_code">
+make
+make install
+</pre>
+ </li>
+</ol>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="fortran">Building the Fortran front-end</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+<p>To build with support for Fortran, follow the directions in the top-level
+<tt>README.LLVM</tt> file, adding ",fortran" to EXTRALANGS, for example:</p>
+
+<pre class="doc_code">
+EXTRALANGS=,fortran
+</pre>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="license">License Information</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+<p>
+The LLVM GCC frontend is licensed to you under the GNU General Public License
+and the GNU Lesser General Public License. Please see the files COPYING and
+COPYING.LIB for more details.
+</p>
+
+<p>
+More information is <a href="FAQ.html#license">available in the FAQ</a>.
+</p>
+</div>
+
+<!-- *********************************************************************** -->
+
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="http://llvm.org/">LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+
+</body>
+</html>
diff --git a/docs/GarbageCollection.html b/docs/GarbageCollection.html
new file mode 100644
index 00000000000..20f2c96a2b5
--- /dev/null
+++ b/docs/GarbageCollection.html
@@ -0,0 +1,1389 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" Content="text/html; charset=UTF-8" >
+ <title>Accurate Garbage Collection with LLVM</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+ <style type="text/css">
+ .rowhead { text-align: left; background: inherit; }
+ .indent { padding-left: 1em; }
+ .optl { color: #BFBFBF; }
+ </style>
+</head>
+<body>
+
+<h1>
+ Accurate Garbage Collection with LLVM
+</h1>
+
+<ol>
+ <li><a href="#introduction">Introduction</a>
+ <ul>
+ <li><a href="#feature">Goals and non-goals</a></li>
+ </ul>
+ </li>
+
+ <li><a href="#quickstart">Getting started</a>
+ <ul>
+ <li><a href="#quickstart-compiler">In your compiler</a></li>
+ <li><a href="#quickstart-runtime">In your runtime library</a></li>
+ <li><a href="#shadow-stack">About the shadow stack</a></li>
+ </ul>
+ </li>
+
+ <li><a href="#core">Core support</a>
+ <ul>
+ <li><a href="#gcattr">Specifying GC code generation:
+ <tt>gc "..."</tt></a></li>
+ <li><a href="#gcroot">Identifying GC roots on the stack:
+ <tt>llvm.gcroot</tt></a></li>
+ <li><a href="#barriers">Reading and writing references in the heap</a>
+ <ul>
+ <li><a href="#gcwrite">Write barrier: <tt>llvm.gcwrite</tt></a></li>
+ <li><a href="#gcread">Read barrier: <tt>llvm.gcread</tt></a></li>
+ </ul>
+ </li>
+ </ul>
+ </li>
+
+ <li><a href="#plugin">Compiler plugin interface</a>
+ <ul>
+ <li><a href="#collector-algos">Overview of available features</a></li>
+ <li><a href="#stack-map">Computing stack maps</a></li>
+ <li><a href="#init-roots">Initializing roots to null:
+ <tt>InitRoots</tt></a></li>
+ <li><a href="#custom">Custom lowering of intrinsics: <tt>CustomRoots</tt>,
+ <tt>CustomReadBarriers</tt>, and <tt>CustomWriteBarriers</tt></a></li>
+ <li><a href="#safe-points">Generating safe points:
+ <tt>NeededSafePoints</tt></a></li>
+ <li><a href="#assembly">Emitting assembly code:
+ <tt>GCMetadataPrinter</tt></a></li>
+ </ul>
+ </li>
+
+ <li><a href="#runtime-impl">Implementing a collector runtime</a>
+ <ul>
+ <li><a href="#gcdescriptors">Tracing GC pointers from heap
+ objects</a></li>
+ </ul>
+ </li>
+
+ <li><a href="#references">References</a></li>
+
+</ol>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a> and
+ Gordon Henriksen</p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="introduction">Introduction</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Garbage collection is a widely used technique that frees the programmer from
+having to know the lifetimes of heap objects, making software easier to produce
+and maintain. Many programming languages rely on garbage collection for
+automatic memory management. There are two primary forms of garbage collection:
+conservative and accurate.</p>
+
+<p>Conservative garbage collection often does not require any special support
+from either the language or the compiler: it can handle non-type-safe
+programming languages (such as C/C++) and does not require any special
+information from the compiler. The
+<a href="http://www.hpl.hp.com/personal/Hans_Boehm/gc/">Boehm collector</a> is
+an example of a state-of-the-art conservative collector.</p>
+
+<p>Accurate garbage collection requires the ability to identify all pointers in
+the program at run-time (which requires that the source-language be type-safe in
+most cases). Identifying pointers at run-time requires compiler support to
+locate all places that hold live pointer variables at run-time, including the
+<a href="#gcroot">processor stack and registers</a>.</p>
+
+<p>Conservative garbage collection is attractive because it does not require any
+special compiler support, but it does have problems. In particular, because the
+conservative garbage collector cannot <i>know</i> that a particular word in the
+machine is a pointer, it cannot move live objects in the heap (preventing the
+use of compacting and generational GC algorithms) and it can occasionally suffer
+from memory leaks due to integer values that happen to point to objects in the
+program. In addition, some aggressive compiler transformations can break
+conservative garbage collectors (though these seem rare in practice).</p>
+
+<p>Accurate garbage collectors do not suffer from any of these problems, but
+they can suffer from degraded scalar optimization of the program. In particular,
+because the runtime must be able to identify and update all pointers active in
+the program, some optimizations are less effective. In practice, however, the
+locality and performance benefits of using aggressive garbage collection
+techniques dominates any low-level losses.</p>
+
+<p>This document describes the mechanisms and interfaces provided by LLVM to
+support accurate garbage collection.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="feature">Goals and non-goals</a>
+</h3>
+
+<div>
+
+<p>LLVM's intermediate representation provides <a href="#intrinsics">garbage
+collection intrinsics</a> that offer support for a broad class of
+collector models. For instance, the intrinsics permit:</p>
+
+<ul>
+ <li>semi-space collectors</li>
+ <li>mark-sweep collectors</li>
+ <li>generational collectors</li>
+ <li>reference counting</li>
+ <li>incremental collectors</li>
+ <li>concurrent collectors</li>
+ <li>cooperative collectors</li>
+</ul>
+
+<p>We hope that the primitive support built into the LLVM IR is sufficient to
+support a broad class of garbage collected languages including Scheme, ML, Java,
+C#, Perl, Python, Lua, Ruby, other scripting languages, and more.</p>
+
+<p>However, LLVM does not itself provide a garbage collector&mdash;this should
+be part of your language's runtime library. LLVM provides a framework for
+compile time <a href="#plugin">code generation plugins</a>. The role of these
+plugins is to generate code and data structures which conforms to the <em>binary
+interface</em> specified by the <em>runtime library</em>. This is similar to the
+relationship between LLVM and DWARF debugging info, for example. The
+difference primarily lies in the lack of an established standard in the domain
+of garbage collection&mdash;thus the plugins.</p>
+
+<p>The aspects of the binary interface with which LLVM's GC support is
+concerned are:</p>
+
+<ul>
+ <li>Creation of GC-safe points within code where collection is allowed to
+ execute safely.</li>
+ <li>Computation of the stack map. For each safe point in the code, object
+ references within the stack frame must be identified so that the
+ collector may traverse and perhaps update them.</li>
+ <li>Write barriers when storing object references to the heap. These are
+ commonly used to optimize incremental scans in generational
+ collectors.</li>
+ <li>Emission of read barriers when loading object references. These are
+ useful for interoperating with concurrent collectors.</li>
+</ul>
+
+<p>There are additional areas that LLVM does not directly address:</p>
+
+<ul>
+ <li>Registration of global roots with the runtime.</li>
+ <li>Registration of stack map entries with the runtime.</li>
+ <li>The functions used by the program to allocate memory, trigger a
+ collection, etc.</li>
+ <li>Computation or compilation of type maps, or registration of them with
+ the runtime. These are used to crawl the heap for object
+ references.</li>
+</ul>
+
+<p>In general, LLVM's support for GC does not include features which can be
+adequately addressed with other features of the IR and does not specify a
+particular binary interface. On the plus side, this means that you should be
+able to integrate LLVM with an existing runtime. On the other hand, it leaves
+a lot of work for the developer of a novel language. However, it's easy to get
+started quickly and scale up to a more sophisticated implementation as your
+compiler matures.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="quickstart">Getting started</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Using a GC with LLVM implies many things, for example:</p>
+
+<ul>
+ <li>Write a runtime library or find an existing one which implements a GC
+ heap.<ol>
+ <li>Implement a memory allocator.</li>
+ <li>Design a binary interface for the stack map, used to identify
+ references within a stack frame on the machine stack.*</li>
+ <li>Implement a stack crawler to discover functions on the call stack.*</li>
+ <li>Implement a registry for global roots.</li>
+ <li>Design a binary interface for type maps, used to identify references
+ within heap objects.</li>
+ <li>Implement a collection routine bringing together all of the above.</li>
+ </ol></li>
+ <li>Emit compatible code from your compiler.<ul>
+ <li>Initialization in the main function.</li>
+ <li>Use the <tt>gc "..."</tt> attribute to enable GC code generation
+ (or <tt>F.setGC("...")</tt>).</li>
+ <li>Use <tt>@llvm.gcroot</tt> to mark stack roots.</li>
+ <li>Use <tt>@llvm.gcread</tt> and/or <tt>@llvm.gcwrite</tt> to
+ manipulate GC references, if necessary.</li>
+ <li>Allocate memory using the GC allocation routine provided by the
+ runtime library.</li>
+ <li>Generate type maps according to your runtime's binary interface.</li>
+ </ul></li>
+ <li>Write a compiler plugin to interface LLVM with the runtime library.*<ul>
+ <li>Lower <tt>@llvm.gcread</tt> and <tt>@llvm.gcwrite</tt> to appropriate
+ code sequences.*</li>
+ <li>Compile LLVM's stack map to the binary form expected by the
+ runtime.</li>
+ </ul></li>
+ <li>Load the plugin into the compiler. Use <tt>llc -load</tt> or link the
+ plugin statically with your language's compiler.*</li>
+ <li>Link program executables with the runtime.</li>
+</ul>
+
+<p>To help with several of these tasks (those indicated with a *), LLVM
+includes a highly portable, built-in ShadowStack code generator. It is compiled
+into <tt>llc</tt> and works even with the interpreter and C backends.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="quickstart-compiler">In your compiler</a>
+</h3>
+
+<div>
+
+<p>To turn the shadow stack on for your functions, first call:</p>
+
+<div class="doc_code"><pre
+>F.setGC("shadow-stack");</pre></div>
+
+<p>for each function your compiler emits. Since the shadow stack is built into
+LLVM, you do not need to load a plugin.</p>
+
+<p>Your compiler must also use <tt>@llvm.gcroot</tt> as documented.
+Don't forget to create a root for each intermediate value that is generated
+when evaluating an expression. In <tt>h(f(), g())</tt>, the result of
+<tt>f()</tt> could easily be collected if evaluating <tt>g()</tt> triggers a
+collection.</p>
+
+<p>There's no need to use <tt>@llvm.gcread</tt> and <tt>@llvm.gcwrite</tt> over
+plain <tt>load</tt> and <tt>store</tt> for now. You will need them when
+switching to a more advanced GC.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="quickstart-runtime">In your runtime</a>
+</h3>
+
+<div>
+
+<p>The shadow stack doesn't imply a memory allocation algorithm. A semispace
+collector or building atop <tt>malloc</tt> are great places to start, and can
+be implemented with very little code.</p>
+
+<p>When it comes time to collect, however, your runtime needs to traverse the
+stack roots, and for this it needs to integrate with the shadow stack. Luckily,
+doing so is very simple. (This code is heavily commented to help you
+understand the data structure, but there are only 20 lines of meaningful
+code.)</p>
+
+<pre class="doc_code">
+/// @brief The map for a single function's stack frame. One of these is
+/// compiled as constant data into the executable for each function.
+///
+/// Storage of metadata values is elided if the %metadata parameter to
+/// @llvm.gcroot is null.
+struct FrameMap {
+ int32_t NumRoots; //&lt; Number of roots in stack frame.
+ int32_t NumMeta; //&lt; Number of metadata entries. May be &lt; NumRoots.
+ const void *Meta[0]; //&lt; Metadata for each root.
+};
+
+/// @brief A link in the dynamic shadow stack. One of these is embedded in the
+/// stack frame of each function on the call stack.
+struct StackEntry {
+ StackEntry *Next; //&lt; Link to next stack entry (the caller's).
+ const FrameMap *Map; //&lt; Pointer to constant FrameMap.
+ void *Roots[0]; //&lt; Stack roots (in-place array).
+};
+
+/// @brief The head of the singly-linked list of StackEntries. Functions push
+/// and pop onto this in their prologue and epilogue.
+///
+/// Since there is only a global list, this technique is not threadsafe.
+StackEntry *llvm_gc_root_chain;
+
+/// @brief Calls Visitor(root, meta) for each GC root on the stack.
+/// root and meta are exactly the values passed to
+/// <tt>@llvm.gcroot</tt>.
+///
+/// Visitor could be a function to recursively mark live objects. Or it
+/// might copy them to another heap or generation.
+///
+/// @param Visitor A function to invoke for every GC root on the stack.
+void visitGCRoots(void (*Visitor)(void **Root, const void *Meta)) {
+ for (StackEntry *R = llvm_gc_root_chain; R; R = R->Next) {
+ unsigned i = 0;
+
+ // For roots [0, NumMeta), the metadata pointer is in the FrameMap.
+ for (unsigned e = R->Map->NumMeta; i != e; ++i)
+ Visitor(&amp;R->Roots[i], R->Map->Meta[i]);
+
+ // For roots [NumMeta, NumRoots), the metadata pointer is null.
+ for (unsigned e = R->Map->NumRoots; i != e; ++i)
+ Visitor(&amp;R->Roots[i], NULL);
+ }
+}</pre>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="shadow-stack">About the shadow stack</a>
+</h3>
+
+<div>
+
+<p>Unlike many GC algorithms which rely on a cooperative code generator to
+compile stack maps, this algorithm carefully maintains a linked list of stack
+roots [<a href="#henderson02">Henderson2002</a>]. This so-called "shadow stack"
+mirrors the machine stack. Maintaining this data structure is slower than using
+a stack map compiled into the executable as constant data, but has a significant
+portability advantage because it requires no special support from the target
+code generator, and does not require tricky platform-specific code to crawl
+the machine stack.</p>
+
+<p>The tradeoff for this simplicity and portability is:</p>
+
+<ul>
+ <li>High overhead per function call.</li>
+ <li>Not thread-safe.</li>
+</ul>
+
+<p>Still, it's an easy way to get started. After your compiler and runtime are
+up and running, writing a <a href="#plugin">plugin</a> will allow you to take
+advantage of <a href="#collector-algos">more advanced GC features</a> of LLVM
+in order to improve performance.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="core">IR features</a><a name="intrinsics"></a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>This section describes the garbage collection facilities provided by the
+<a href="LangRef.html">LLVM intermediate representation</a>. The exact behavior
+of these IR features is specified by the binary interface implemented by a
+<a href="#plugin">code generation plugin</a>, not by this document.</p>
+
+<p>These facilities are limited to those strictly necessary; they are not
+intended to be a complete interface to any garbage collector. A program will
+need to interface with the GC library using the facilities provided by that
+program.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="gcattr">Specifying GC code generation: <tt>gc "..."</tt></a>
+</h3>
+
+<div>
+
+<div class="doc_code"><tt>
+ define <i>ty</i> @<i>name</i>(...) <span style="text-decoration: underline">gc "<i>name</i>"</span> { ...
+</tt></div>
+
+<p>The <tt>gc</tt> function attribute is used to specify the desired GC style
+to the compiler. Its programmatic equivalent is the <tt>setGC</tt> method of
+<tt>Function</tt>.</p>
+
+<p>Setting <tt>gc "<i>name</i>"</tt> on a function triggers a search for a
+matching code generation plugin "<i>name</i>"; it is that plugin which defines
+the exact nature of the code generated to support GC. If none is found, the
+compiler will raise an error.</p>
+
+<p>Specifying the GC style on a per-function basis allows LLVM to link together
+programs that use different garbage collection algorithms (or none at all).</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="gcroot">Identifying GC roots on the stack: <tt>llvm.gcroot</tt></a>
+</h3>
+
+<div>
+
+<div class="doc_code"><tt>
+ void @llvm.gcroot(i8** %ptrloc, i8* %metadata)
+</tt></div>
+
+<p>The <tt>llvm.gcroot</tt> intrinsic is used to inform LLVM that a stack
+variable references an object on the heap and is to be tracked for garbage
+collection. The exact impact on generated code is specified by a <a
+href="#plugin">compiler plugin</a>. All calls to <tt>llvm.gcroot</tt> <b>must</b> reside
+ inside the first basic block.</p>
+
+<p>A compiler which uses mem2reg to raise imperative code using <tt>alloca</tt>
+into SSA form need only add a call to <tt>@llvm.gcroot</tt> for those variables
+which a pointers into the GC heap.</p>
+
+<p>It is also important to mark intermediate values with <tt>llvm.gcroot</tt>.
+For example, consider <tt>h(f(), g())</tt>. Beware leaking the result of
+<tt>f()</tt> in the case that <tt>g()</tt> triggers a collection. Note, that
+stack variables must be initialized and marked with <tt>llvm.gcroot</tt> in
+function's prologue.</p>
+
+<p>The first argument <b>must</b> be a value referring to an alloca instruction
+or a bitcast of an alloca. The second contains a pointer to metadata that
+should be associated with the pointer, and <b>must</b> be a constant or global
+value address. If your target collector uses tags, use a null pointer for
+metadata.</p>
+
+<p>The <tt>%metadata</tt> argument can be used to avoid requiring heap objects
+to have 'isa' pointers or tag bits. [<a href="#appel89">Appel89</a>, <a
+href="#goldberg91">Goldberg91</a>, <a href="#tolmach94">Tolmach94</a>] If
+specified, its value will be tracked along with the location of the pointer in
+the stack frame.</p>
+
+<p>Consider the following fragment of Java code:</p>
+
+<pre class="doc_code">
+ {
+ Object X; // A null-initialized reference to an object
+ ...
+ }
+</pre>
+
+<p>This block (which may be located in the middle of a function or in a loop
+nest), could be compiled to this LLVM code:</p>
+
+<pre class="doc_code">
+Entry:
+ ;; In the entry block for the function, allocate the
+ ;; stack space for X, which is an LLVM pointer.
+ %X = alloca %Object*
+
+ ;; Tell LLVM that the stack space is a stack root.
+ ;; Java has type-tags on objects, so we pass null as metadata.
+ %tmp = bitcast %Object** %X to i8**
+ call void @llvm.gcroot(i8** %tmp, i8* null)
+ ...
+
+ ;; "CodeBlock" is the block corresponding to the start
+ ;; of the scope above.
+CodeBlock:
+ ;; Java null-initializes pointers.
+ store %Object* null, %Object** %X
+
+ ...
+
+ ;; As the pointer goes out of scope, store a null value into
+ ;; it, to indicate that the value is no longer live.
+ store %Object* null, %Object** %X
+ ...
+</pre>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="barriers">Reading and writing references in the heap</a>
+</h3>
+
+<div>
+
+<p>Some collectors need to be informed when the mutator (the program that needs
+garbage collection) either reads a pointer from or writes a pointer to a field
+of a heap object. The code fragments inserted at these points are called
+<em>read barriers</em> and <em>write barriers</em>, respectively. The amount of
+code that needs to be executed is usually quite small and not on the critical
+path of any computation, so the overall performance impact of the barrier is
+tolerable.</p>
+
+<p>Barriers often require access to the <em>object pointer</em> rather than the
+<em>derived pointer</em> (which is a pointer to the field within the
+object). Accordingly, these intrinsics take both pointers as separate arguments
+for completeness. In this snippet, <tt>%object</tt> is the object pointer, and
+<tt>%derived</tt> is the derived pointer:</p>
+
+<blockquote><pre>
+ ;; An array type.
+ %class.Array = type { %class.Object, i32, [0 x %class.Object*] }
+ ...
+
+ ;; Load the object pointer from a gcroot.
+ %object = load %class.Array** %object_addr
+
+ ;; Compute the derived pointer.
+ %derived = getelementptr %object, i32 0, i32 2, i32 %n</pre></blockquote>
+
+<p>LLVM does not enforce this relationship between the object and derived
+pointer (although a <a href="#plugin">plugin</a> might). However, it would be
+an unusual collector that violated it.</p>
+
+<p>The use of these intrinsics is naturally optional if the target GC does
+require the corresponding barrier. Such a GC plugin will replace the intrinsic
+calls with the corresponding <tt>load</tt> or <tt>store</tt> instruction if they
+are used.</p>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="gcwrite">Write barrier: <tt>llvm.gcwrite</tt></a>
+</h4>
+
+<div>
+
+<div class="doc_code"><tt>
+void @llvm.gcwrite(i8* %value, i8* %object, i8** %derived)
+</tt></div>
+
+<p>For write barriers, LLVM provides the <tt>llvm.gcwrite</tt> intrinsic
+function. It has exactly the same semantics as a non-volatile <tt>store</tt> to
+the derived pointer (the third argument). The exact code generated is specified
+by a <a href="#plugin">compiler plugin</a>.</p>
+
+<p>Many important algorithms require write barriers, including generational
+and concurrent collectors. Additionally, write barriers could be used to
+implement reference counting.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="gcread">Read barrier: <tt>llvm.gcread</tt></a>
+</h4>
+
+<div>
+
+<div class="doc_code"><tt>
+i8* @llvm.gcread(i8* %object, i8** %derived)<br>
+</tt></div>
+
+<p>For read barriers, LLVM provides the <tt>llvm.gcread</tt> intrinsic function.
+It has exactly the same semantics as a non-volatile <tt>load</tt> from the
+derived pointer (the second argument). The exact code generated is specified by
+a <a href="#plugin">compiler plugin</a>.</p>
+
+<p>Read barriers are needed by fewer algorithms than write barriers, and may
+have a greater performance impact since pointer reads are more frequent than
+writes.</p>
+
+</div>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="plugin">Implementing a collector plugin</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>User code specifies which GC code generation to use with the <tt>gc</tt>
+function attribute or, equivalently, with the <tt>setGC</tt> method of
+<tt>Function</tt>.</p>
+
+<p>To implement a GC plugin, it is necessary to subclass
+<tt>llvm::GCStrategy</tt>, which can be accomplished in a few lines of
+boilerplate code. LLVM's infrastructure provides access to several important
+algorithms. For an uncontroversial collector, all that remains may be to
+compile LLVM's computed stack map to assembly code (using the binary
+representation expected by the runtime library). This can be accomplished in
+about 100 lines of code.</p>
+
+<p>This is not the appropriate place to implement a garbage collected heap or a
+garbage collector itself. That code should exist in the language's runtime
+library. The compiler plugin is responsible for generating code which
+conforms to the binary interface defined by library, most essentially the
+<a href="#stack-map">stack map</a>.</p>
+
+<p>To subclass <tt>llvm::GCStrategy</tt> and register it with the compiler:</p>
+
+<blockquote><pre>// lib/MyGC/MyGC.cpp - Example LLVM GC plugin
+
+#include "llvm/CodeGen/GCStrategy.h"
+#include "llvm/CodeGen/GCMetadata.h"
+#include "llvm/Support/Compiler.h"
+
+using namespace llvm;
+
+namespace {
+ class LLVM_LIBRARY_VISIBILITY MyGC : public GCStrategy {
+ public:
+ MyGC() {}
+ };
+
+ GCRegistry::Add&lt;MyGC&gt;
+ X("mygc", "My bespoke garbage collector.");
+}</pre></blockquote>
+
+<p>This boilerplate collector does nothing. More specifically:</p>
+
+<ul>
+ <li><tt>llvm.gcread</tt> calls are replaced with the corresponding
+ <tt>load</tt> instruction.</li>
+ <li><tt>llvm.gcwrite</tt> calls are replaced with the corresponding
+ <tt>store</tt> instruction.</li>
+ <li>No safe points are added to the code.</li>
+ <li>The stack map is not compiled into the executable.</li>
+</ul>
+
+<p>Using the LLVM makefiles (like the <a
+href="http://llvm.org/viewvc/llvm-project/llvm/trunk/projects/sample/">sample
+project</a>), this code can be compiled as a plugin using a simple
+makefile:</p>
+
+<blockquote><pre
+># lib/MyGC/Makefile
+
+LEVEL := ../..
+LIBRARYNAME = <var>MyGC</var>
+LOADABLE_MODULE = 1
+
+include $(LEVEL)/Makefile.common</pre></blockquote>
+
+<p>Once the plugin is compiled, code using it may be compiled using <tt>llc
+-load=<var>MyGC.so</var></tt> (though <var>MyGC.so</var> may have some other
+platform-specific extension):</p>
+
+<blockquote><pre
+>$ cat sample.ll
+define void @f() gc "mygc" {
+entry:
+ ret void
+}
+$ llvm-as &lt; sample.ll | llc -load=MyGC.so</pre></blockquote>
+
+<p>It is also possible to statically link the collector plugin into tools, such
+as a language-specific compiler front-end.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="collector-algos">Overview of available features</a>
+</h3>
+
+<div>
+
+<p><tt>GCStrategy</tt> provides a range of features through which a plugin
+may do useful work. Some of these are callbacks, some are algorithms that can
+be enabled, disabled, or customized. This matrix summarizes the supported (and
+planned) features and correlates them with the collection techniques which
+typically require them.</p>
+
+<table>
+ <tr>
+ <th>Algorithm</th>
+ <th>Done</th>
+ <th>shadow stack</th>
+ <th>refcount</th>
+ <th>mark-sweep</th>
+ <th>copying</th>
+ <th>incremental</th>
+ <th>threaded</th>
+ <th>concurrent</th>
+ </tr>
+ <tr>
+ <th class="rowhead"><a href="#stack-map">stack map</a></th>
+ <td>&#10004;</td>
+ <td></td>
+ <td></td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ </tr>
+ <tr>
+ <th class="rowhead"><a href="#init-roots">initialize roots</a></th>
+ <td>&#10004;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ </tr>
+ <tr class="doc_warning">
+ <th class="rowhead">derived pointers</th>
+ <td>NO</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>&#10008;*</td>
+ <td>&#10008;*</td>
+ </tr>
+ <tr>
+ <th class="rowhead"><em><a href="#custom">custom lowering</a></em></th>
+ <td>&#10004;</td>
+ <th></th>
+ <th></th>
+ <th></th>
+ <th></th>
+ <th></th>
+ <th></th>
+ <th></th>
+ </tr>
+ <tr>
+ <th class="rowhead indent">gcroot</th>
+ <td>&#10004;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ </tr>
+ <tr>
+ <th class="rowhead indent">gcwrite</th>
+ <td>&#10004;</td>
+ <td></td>
+ <td>&#10008;</td>
+ <td></td>
+ <td></td>
+ <td>&#10008;</td>
+ <td></td>
+ <td>&#10008;</td>
+ </tr>
+ <tr>
+ <th class="rowhead indent">gcread</th>
+ <td>&#10004;</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>&#10008;</td>
+ </tr>
+ <tr>
+ <th class="rowhead"><em><a href="#safe-points">safe points</a></em></th>
+ <td></td>
+ <th></th>
+ <th></th>
+ <th></th>
+ <th></th>
+ <th></th>
+ <th></th>
+ <th></th>
+ </tr>
+ <tr>
+ <th class="rowhead indent">in calls</th>
+ <td>&#10004;</td>
+ <td></td>
+ <td></td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ </tr>
+ <tr>
+ <th class="rowhead indent">before calls</th>
+ <td>&#10004;</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ </tr>
+ <tr class="doc_warning">
+ <th class="rowhead indent">for loops</th>
+ <td>NO</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ </tr>
+ <tr>
+ <th class="rowhead indent">before escape</th>
+ <td>&#10004;</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ </tr>
+ <tr class="doc_warning">
+ <th class="rowhead">emit code at safe points</th>
+ <td>NO</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ </tr>
+ <tr>
+ <th class="rowhead"><em>output</em></th>
+ <td></td>
+ <th></th>
+ <th></th>
+ <th></th>
+ <th></th>
+ <th></th>
+ <th></th>
+ <th></th>
+ </tr>
+ <tr>
+ <th class="rowhead indent"><a href="#assembly">assembly</a></th>
+ <td>&#10004;</td>
+ <td></td>
+ <td></td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ <td>&#10008;</td>
+ </tr>
+ <tr class="doc_warning">
+ <th class="rowhead indent">JIT</th>
+ <td>NO</td>
+ <td></td>
+ <td></td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ </tr>
+ <tr class="doc_warning">
+ <th class="rowhead indent">obj</th>
+ <td>NO</td>
+ <td></td>
+ <td></td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ </tr>
+ <tr class="doc_warning">
+ <th class="rowhead">live analysis</th>
+ <td>NO</td>
+ <td></td>
+ <td></td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ </tr>
+ <tr class="doc_warning">
+ <th class="rowhead">register map</th>
+ <td>NO</td>
+ <td></td>
+ <td></td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ <td class="optl">&#10008;</td>
+ </tr>
+ <tr>
+ <td colspan="10">
+ <div><span class="doc_warning">*</span> Derived pointers only pose a
+ hazard to copying collectors.</div>
+ <div><span class="optl">&#10008;</span> in gray denotes a feature which
+ could be utilized if available.</div>
+ </td>
+ </tr>
+</table>
+
+<p>To be clear, the collection techniques above are defined as:</p>
+
+<dl>
+ <dt>Shadow Stack</dt>
+ <dd>The mutator carefully maintains a linked list of stack roots.</dd>
+ <dt>Reference Counting</dt>
+ <dd>The mutator maintains a reference count for each object and frees an
+ object when its count falls to zero.</dd>
+ <dt>Mark-Sweep</dt>
+ <dd>When the heap is exhausted, the collector marks reachable objects starting
+ from the roots, then deallocates unreachable objects in a sweep
+ phase.</dd>
+ <dt>Copying</dt>
+ <dd>As reachability analysis proceeds, the collector copies objects from one
+ heap area to another, compacting them in the process. Copying collectors
+ enable highly efficient "bump pointer" allocation and can improve locality
+ of reference.</dd>
+ <dt>Incremental</dt>
+ <dd>(Including generational collectors.) Incremental collectors generally have
+ all the properties of a copying collector (regardless of whether the
+ mature heap is compacting), but bring the added complexity of requiring
+ write barriers.</dd>
+ <dt>Threaded</dt>
+ <dd>Denotes a multithreaded mutator; the collector must still stop the mutator
+ ("stop the world") before beginning reachability analysis. Stopping a
+ multithreaded mutator is a complicated problem. It generally requires
+ highly platform specific code in the runtime, and the production of
+ carefully designed machine code at safe points.</dd>
+ <dt>Concurrent</dt>
+ <dd>In this technique, the mutator and the collector run concurrently, with
+ the goal of eliminating pause times. In a <em>cooperative</em> collector,
+ the mutator further aids with collection should a pause occur, allowing
+ collection to take advantage of multiprocessor hosts. The "stop the world"
+ problem of threaded collectors is generally still present to a limited
+ extent. Sophisticated marking algorithms are necessary. Read barriers may
+ be necessary.</dd>
+</dl>
+
+<p>As the matrix indicates, LLVM's garbage collection infrastructure is already
+suitable for a wide variety of collectors, but does not currently extend to
+multithreaded programs. This will be added in the future as there is
+interest.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="stack-map">Computing stack maps</a>
+</h3>
+
+<div>
+
+<p>LLVM automatically computes a stack map. One of the most important features
+of a <tt>GCStrategy</tt> is to compile this information into the executable in
+the binary representation expected by the runtime library.</p>
+
+<p>The stack map consists of the location and identity of each GC root in the
+each function in the module. For each root:</p>
+
+<ul>
+ <li><tt>RootNum</tt>: The index of the root.</li>
+ <li><tt>StackOffset</tt>: The offset of the object relative to the frame
+ pointer.</li>
+ <li><tt>RootMetadata</tt>: The value passed as the <tt>%metadata</tt>
+ parameter to the <a href="#gcroot"><tt>@llvm.gcroot</tt></a> intrinsic.</li>
+</ul>
+
+<p>Also, for the function as a whole:</p>
+
+<ul>
+ <li><tt>getFrameSize()</tt>: The overall size of the function's initial
+ stack frame, not accounting for any dynamic allocation.</li>
+ <li><tt>roots_size()</tt>: The count of roots in the function.</li>
+</ul>
+
+<p>To access the stack map, use <tt>GCFunctionMetadata::roots_begin()</tt> and
+-<tt>end()</tt> from the <tt><a
+href="#assembly">GCMetadataPrinter</a></tt>:</p>
+
+<blockquote><pre
+>for (iterator I = begin(), E = end(); I != E; ++I) {
+ GCFunctionInfo *FI = *I;
+ unsigned FrameSize = FI-&gt;getFrameSize();
+ size_t RootCount = FI-&gt;roots_size();
+
+ for (GCFunctionInfo::roots_iterator RI = FI-&gt;roots_begin(),
+ RE = FI-&gt;roots_end();
+ RI != RE; ++RI) {
+ int RootNum = RI->Num;
+ int RootStackOffset = RI->StackOffset;
+ Constant *RootMetadata = RI->Metadata;
+ }
+}</pre></blockquote>
+
+<p>If the <tt>llvm.gcroot</tt> intrinsic is eliminated before code generation by
+a custom lowering pass, LLVM will compute an empty stack map. This may be useful
+for collector plugins which implement reference counting or a shadow stack.</p>
+
+</div>
+
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="init-roots">Initializing roots to null: <tt>InitRoots</tt></a>
+</h3>
+
+<div>
+
+<blockquote><pre
+>MyGC::MyGC() {
+ InitRoots = true;
+}</pre></blockquote>
+
+<p>When set, LLVM will automatically initialize each root to <tt>null</tt> upon
+entry to the function. This prevents the GC's sweep phase from visiting
+uninitialized pointers, which will almost certainly cause it to crash. This
+initialization occurs before custom lowering, so the two may be used
+together.</p>
+
+<p>Since LLVM does not yet compute liveness information, there is no means of
+distinguishing an uninitialized stack root from an initialized one. Therefore,
+this feature should be used by all GC plugins. It is enabled by default.</p>
+
+</div>
+
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="custom">Custom lowering of intrinsics: <tt>CustomRoots</tt>,
+ <tt>CustomReadBarriers</tt>, and <tt>CustomWriteBarriers</tt></a>
+</h3>
+
+<div>
+
+<p>For GCs which use barriers or unusual treatment of stack roots, these
+flags allow the collector to perform arbitrary transformations of the LLVM
+IR:</p>
+
+<blockquote><pre
+>class MyGC : public GCStrategy {
+public:
+ MyGC() {
+ CustomRoots = true;
+ CustomReadBarriers = true;
+ CustomWriteBarriers = true;
+ }
+
+ virtual bool initializeCustomLowering(Module &amp;M);
+ virtual bool performCustomLowering(Function &amp;F);
+};</pre></blockquote>
+
+<p>If any of these flags are set, then LLVM suppresses its default lowering for
+the corresponding intrinsics and instead calls
+<tt>performCustomLowering</tt>.</p>
+
+<p>LLVM's default action for each intrinsic is as follows:</p>
+
+<ul>
+ <li><tt>llvm.gcroot</tt>: Leave it alone. The code generator must see it
+ or the stack map will not be computed.</li>
+ <li><tt>llvm.gcread</tt>: Substitute a <tt>load</tt> instruction.</li>
+ <li><tt>llvm.gcwrite</tt>: Substitute a <tt>store</tt> instruction.</li>
+</ul>
+
+<p>If <tt>CustomReadBarriers</tt> or <tt>CustomWriteBarriers</tt> are specified,
+then <tt>performCustomLowering</tt> <strong>must</strong> eliminate the
+corresponding barriers.</p>
+
+<p><tt>performCustomLowering</tt> must comply with the same restrictions as <a
+href="WritingAnLLVMPass.html#runOnFunction"><tt
+>FunctionPass::runOnFunction</tt></a>.
+Likewise, <tt>initializeCustomLowering</tt> has the same semantics as <a
+href="WritingAnLLVMPass.html#doInitialization_mod"><tt
+>Pass::doInitialization(Module&amp;)</tt></a>.</p>
+
+<p>The following can be used as a template:</p>
+
+<blockquote><pre
+>#include "llvm/Module.h"
+#include "llvm/IntrinsicInst.h"
+
+bool MyGC::initializeCustomLowering(Module &amp;M) {
+ return false;
+}
+
+bool MyGC::performCustomLowering(Function &amp;F) {
+ bool MadeChange = false;
+
+ for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
+ for (BasicBlock::iterator II = BB-&gt;begin(), E = BB-&gt;end(); II != E; )
+ if (IntrinsicInst *CI = dyn_cast&lt;IntrinsicInst&gt;(II++))
+ if (Function *F = CI-&gt;getCalledFunction())
+ switch (F-&gt;getIntrinsicID()) {
+ case Intrinsic::gcwrite:
+ // Handle llvm.gcwrite.
+ CI-&gt;eraseFromParent();
+ MadeChange = true;
+ break;
+ case Intrinsic::gcread:
+ // Handle llvm.gcread.
+ CI-&gt;eraseFromParent();
+ MadeChange = true;
+ break;
+ case Intrinsic::gcroot:
+ // Handle llvm.gcroot.
+ CI-&gt;eraseFromParent();
+ MadeChange = true;
+ break;
+ }
+
+ return MadeChange;
+}</pre></blockquote>
+
+</div>
+
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="safe-points">Generating safe points: <tt>NeededSafePoints</tt></a>
+</h3>
+
+<div>
+
+<p>LLVM can compute four kinds of safe points:</p>
+
+<blockquote><pre
+>namespace GC {
+ /// PointKind - The type of a collector-safe point.
+ ///
+ enum PointKind {
+ Loop, //&lt; Instr is a loop (backwards branch).
+ Return, //&lt; Instr is a return instruction.
+ PreCall, //&lt; Instr is a call instruction.
+ PostCall //&lt; Instr is the return address of a call.
+ };
+}</pre></blockquote>
+
+<p>A collector can request any combination of the four by setting the
+<tt>NeededSafePoints</tt> mask:</p>
+
+<blockquote><pre
+>MyGC::MyGC() {
+ NeededSafePoints = 1 &lt;&lt; GC::Loop
+ | 1 &lt;&lt; GC::Return
+ | 1 &lt;&lt; GC::PreCall
+ | 1 &lt;&lt; GC::PostCall;
+}</pre></blockquote>
+
+<p>It can then use the following routines to access safe points.</p>
+
+<blockquote><pre
+>for (iterator I = begin(), E = end(); I != E; ++I) {
+ GCFunctionInfo *MD = *I;
+ size_t PointCount = MD-&gt;size();
+
+ for (GCFunctionInfo::iterator PI = MD-&gt;begin(),
+ PE = MD-&gt;end(); PI != PE; ++PI) {
+ GC::PointKind PointKind = PI-&gt;Kind;
+ unsigned PointNum = PI-&gt;Num;
+ }
+}
+</pre></blockquote>
+
+<p>Almost every collector requires <tt>PostCall</tt> safe points, since these
+correspond to the moments when the function is suspended during a call to a
+subroutine.</p>
+
+<p>Threaded programs generally require <tt>Loop</tt> safe points to guarantee
+that the application will reach a safe point within a bounded amount of time,
+even if it is executing a long-running loop which contains no function
+calls.</p>
+
+<p>Threaded collectors may also require <tt>Return</tt> and <tt>PreCall</tt>
+safe points to implement "stop the world" techniques using self-modifying code,
+where it is important that the program not exit the function without reaching a
+safe point (because only the topmost function has been patched).</p>
+
+</div>
+
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="assembly">Emitting assembly code: <tt>GCMetadataPrinter</tt></a>
+</h3>
+
+<div>
+
+<p>LLVM allows a plugin to print arbitrary assembly code before and after the
+rest of a module's assembly code. At the end of the module, the GC can compile
+the LLVM stack map into assembly code. (At the beginning, this information is not
+yet computed.)</p>
+
+<p>Since AsmWriter and CodeGen are separate components of LLVM, a separate
+abstract base class and registry is provided for printing assembly code, the
+<tt>GCMetadaPrinter</tt> and <tt>GCMetadataPrinterRegistry</tt>. The AsmWriter
+will look for such a subclass if the <tt>GCStrategy</tt> sets
+<tt>UsesMetadata</tt>:</p>
+
+<blockquote><pre
+>MyGC::MyGC() {
+ UsesMetadata = true;
+}</pre></blockquote>
+
+<p>This separation allows JIT-only clients to be smaller.</p>
+
+<p>Note that LLVM does not currently have analogous APIs to support code
+generation in the JIT, nor using the object writers.</p>
+
+<blockquote><pre
+>// lib/MyGC/MyGCPrinter.cpp - Example LLVM GC printer
+
+#include "llvm/CodeGen/GCMetadataPrinter.h"
+#include "llvm/Support/Compiler.h"
+
+using namespace llvm;
+
+namespace {
+ class LLVM_LIBRARY_VISIBILITY MyGCPrinter : public GCMetadataPrinter {
+ public:
+ virtual void beginAssembly(std::ostream &amp;OS, AsmPrinter &amp;AP,
+ const TargetAsmInfo &amp;TAI);
+
+ virtual void finishAssembly(std::ostream &amp;OS, AsmPrinter &amp;AP,
+ const TargetAsmInfo &amp;TAI);
+ };
+
+ GCMetadataPrinterRegistry::Add&lt;MyGCPrinter&gt;
+ X("mygc", "My bespoke garbage collector.");
+}</pre></blockquote>
+
+<p>The collector should use <tt>AsmPrinter</tt> and <tt>TargetAsmInfo</tt> to
+print portable assembly code to the <tt>std::ostream</tt>. The collector itself
+contains the stack map for the entire module, and may access the
+<tt>GCFunctionInfo</tt> using its own <tt>begin()</tt> and <tt>end()</tt>
+methods. Here's a realistic example:</p>
+
+<blockquote><pre
+>#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/Function.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetAsmInfo.h"
+
+void MyGCPrinter::beginAssembly(std::ostream &amp;OS, AsmPrinter &amp;AP,
+ const TargetAsmInfo &amp;TAI) {
+ // Nothing to do.
+}
+
+void MyGCPrinter::finishAssembly(std::ostream &amp;OS, AsmPrinter &amp;AP,
+ const TargetAsmInfo &amp;TAI) {
+ // Set up for emitting addresses.
+ const char *AddressDirective;
+ int AddressAlignLog;
+ if (AP.TM.getTargetData()->getPointerSize() == sizeof(int32_t)) {
+ AddressDirective = TAI.getData32bitsDirective();
+ AddressAlignLog = 2;
+ } else {
+ AddressDirective = TAI.getData64bitsDirective();
+ AddressAlignLog = 3;
+ }
+
+ // Put this in the data section.
+ AP.SwitchToDataSection(TAI.getDataSection());
+
+ // For each function...
+ for (iterator FI = begin(), FE = end(); FI != FE; ++FI) {
+ GCFunctionInfo &amp;MD = **FI;
+
+ // Emit this data structure:
+ //
+ // struct {
+ // int32_t PointCount;
+ // struct {
+ // void *SafePointAddress;
+ // int32_t LiveCount;
+ // int32_t LiveOffsets[LiveCount];
+ // } Points[PointCount];
+ // } __gcmap_&lt;FUNCTIONNAME&gt;;
+
+ // Align to address width.
+ AP.EmitAlignment(AddressAlignLog);
+
+ // Emit the symbol by which the stack map entry can be found.
+ std::string Symbol;
+ Symbol += TAI.getGlobalPrefix();
+ Symbol += "__gcmap_";
+ Symbol += MD.getFunction().getName();
+ if (const char *GlobalDirective = TAI.getGlobalDirective())
+ OS &lt;&lt; GlobalDirective &lt;&lt; Symbol &lt;&lt; "\n";
+ OS &lt;&lt; TAI.getGlobalPrefix() &lt;&lt; Symbol &lt;&lt; ":\n";
+
+ // Emit PointCount.
+ AP.EmitInt32(MD.size());
+ AP.EOL("safe point count");
+
+ // And each safe point...
+ for (GCFunctionInfo::iterator PI = MD.begin(),
+ PE = MD.end(); PI != PE; ++PI) {
+ // Align to address width.
+ AP.EmitAlignment(AddressAlignLog);
+
+ // Emit the address of the safe point.
+ OS &lt;&lt; AddressDirective
+ &lt;&lt; TAI.getPrivateGlobalPrefix() &lt;&lt; "label" &lt;&lt; PI-&gt;Num;
+ AP.EOL("safe point address");
+
+ // Emit the stack frame size.
+ AP.EmitInt32(MD.getFrameSize());
+ AP.EOL("stack frame size");
+
+ // Emit the number of live roots in the function.
+ AP.EmitInt32(MD.live_size(PI));
+ AP.EOL("live root count");
+
+ // And for each live root...
+ for (GCFunctionInfo::live_iterator LI = MD.live_begin(PI),
+ LE = MD.live_end(PI);
+ LI != LE; ++LI) {
+ // Print its offset within the stack frame.
+ AP.EmitInt32(LI-&gt;StackOffset);
+ AP.EOL("stack offset");
+ }
+ }
+ }
+}
+</pre></blockquote>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="references">References</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p><a name="appel89">[Appel89]</a> Runtime Tags Aren't Necessary. Andrew
+W. Appel. Lisp and Symbolic Computation 19(7):703-705, July 1989.</p>
+
+<p><a name="goldberg91">[Goldberg91]</a> Tag-free garbage collection for
+strongly typed programming languages. Benjamin Goldberg. ACM SIGPLAN
+PLDI'91.</p>
+
+<p><a name="tolmach94">[Tolmach94]</a> Tag-free garbage collection using
+explicit type parameters. Andrew Tolmach. Proceedings of the 1994 ACM
+conference on LISP and functional programming.</p>
+
+<p><a name="henderson02">[Henderson2002]</a> <a
+href="http://citeseer.ist.psu.edu/henderson02accurate.html">
+Accurate Garbage Collection in an Uncooperative Environment</a>.
+Fergus Henderson. International Symposium on Memory Management 2002.</p>
+
+</div>
+
+
+<!-- *********************************************************************** -->
+
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+
+</body>
+</html>
diff --git a/docs/GetElementPtr.rst b/docs/GetElementPtr.rst
new file mode 100644
index 00000000000..f6f904b2e35
--- /dev/null
+++ b/docs/GetElementPtr.rst
@@ -0,0 +1,538 @@
+.. _gep:
+
+=======================================
+The Often Misunderstood GEP Instruction
+=======================================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+This document seeks to dispel the mystery and confusion surrounding LLVM's
+`GetElementPtr <LangRef.html#i_getelementptr>`_ (GEP) instruction. Questions
+about the wily GEP instruction are probably the most frequently occurring
+questions once a developer gets down to coding with LLVM. Here we lay out the
+sources of confusion and show that the GEP instruction is really quite simple.
+
+Address Computation
+===================
+
+When people are first confronted with the GEP instruction, they tend to relate
+it to known concepts from other programming paradigms, most notably C array
+indexing and field selection. GEP closely resembles C array indexing and field
+selection, however it's is a little different and this leads to the following
+questions.
+
+What is the first index of the GEP instruction?
+-----------------------------------------------
+
+Quick answer: The index stepping through the first operand.
+
+The confusion with the first index usually arises from thinking about the
+GetElementPtr instruction as if it was a C index operator. They aren't the
+same. For example, when we write, in "C":
+
+.. code-block:: c++
+
+ AType *Foo;
+ ...
+ X = &Foo->F;
+
+it is natural to think that there is only one index, the selection of the field
+``F``. However, in this example, ``Foo`` is a pointer. That pointer
+must be indexed explicitly in LLVM. C, on the other hand, indices through it
+transparently. To arrive at the same address location as the C code, you would
+provide the GEP instruction with two index operands. The first operand indexes
+through the pointer; the second operand indexes the field ``F`` of the
+structure, just as if you wrote:
+
+.. code-block:: c++
+
+ X = &Foo[0].F;
+
+Sometimes this question gets rephrased as:
+
+.. _GEP index through first pointer:
+
+ *Why is it okay to index through the first pointer, but subsequent pointers
+ won't be dereferenced?*
+
+The answer is simply because memory does not have to be accessed to perform the
+computation. The first operand to the GEP instruction must be a value of a
+pointer type. The value of the pointer is provided directly to the GEP
+instruction as an operand without any need for accessing memory. It must,
+therefore be indexed and requires an index operand. Consider this example:
+
+.. code-block:: c++
+
+ struct munger_struct {
+ int f1;
+ int f2;
+ };
+ void munge(struct munger_struct *P) {
+ P[0].f1 = P[1].f1 + P[2].f2;
+ }
+ ...
+ munger_struct Array[3];
+ ...
+ munge(Array);
+
+In this "C" example, the front end compiler (llvm-gcc) will generate three GEP
+instructions for the three indices through "P" in the assignment statement. The
+function argument ``P`` will be the first operand of each of these GEP
+instructions. The second operand indexes through that pointer. The third
+operand will be the field offset into the ``struct munger_struct`` type, for
+either the ``f1`` or ``f2`` field. So, in LLVM assembly the ``munge`` function
+looks like:
+
+.. code-block:: llvm
+
+ void %munge(%struct.munger_struct* %P) {
+ entry:
+ %tmp = getelementptr %struct.munger_struct* %P, i32 1, i32 0
+ %tmp = load i32* %tmp
+ %tmp6 = getelementptr %struct.munger_struct* %P, i32 2, i32 1
+ %tmp7 = load i32* %tmp6
+ %tmp8 = add i32 %tmp7, %tmp
+ %tmp9 = getelementptr %struct.munger_struct* %P, i32 0, i32 0
+ store i32 %tmp8, i32* %tmp9
+ ret void
+ }
+
+In each case the first operand is the pointer through which the GEP instruction
+starts. The same is true whether the first operand is an argument, allocated
+memory, or a global variable.
+
+To make this clear, let's consider a more obtuse example:
+
+.. code-block:: llvm
+
+ %MyVar = uninitialized global i32
+ ...
+ %idx1 = getelementptr i32* %MyVar, i64 0
+ %idx2 = getelementptr i32* %MyVar, i64 1
+ %idx3 = getelementptr i32* %MyVar, i64 2
+
+These GEP instructions are simply making address computations from the base
+address of ``MyVar``. They compute, as follows (using C syntax):
+
+.. code-block:: c++
+
+ idx1 = (char*) &MyVar + 0
+ idx2 = (char*) &MyVar + 4
+ idx3 = (char*) &MyVar + 8
+
+Since the type ``i32`` is known to be four bytes long, the indices 0, 1 and 2
+translate into memory offsets of 0, 4, and 8, respectively. No memory is
+accessed to make these computations because the address of ``%MyVar`` is passed
+directly to the GEP instructions.
+
+The obtuse part of this example is in the cases of ``%idx2`` and ``%idx3``. They
+result in the computation of addresses that point to memory past the end of the
+``%MyVar`` global, which is only one ``i32`` long, not three ``i32``\s long.
+While this is legal in LLVM, it is inadvisable because any load or store with
+the pointer that results from these GEP instructions would produce undefined
+results.
+
+Why is the extra 0 index required?
+----------------------------------
+
+Quick answer: there are no superfluous indices.
+
+This question arises most often when the GEP instruction is applied to a global
+variable which is always a pointer type. For example, consider this:
+
+.. code-block:: llvm
+
+ %MyStruct = uninitialized global { float*, i32 }
+ ...
+ %idx = getelementptr { float*, i32 }* %MyStruct, i64 0, i32 1
+
+The GEP above yields an ``i32*`` by indexing the ``i32`` typed field of the
+structure ``%MyStruct``. When people first look at it, they wonder why the ``i64
+0`` index is needed. However, a closer inspection of how globals and GEPs work
+reveals the need. Becoming aware of the following facts will dispel the
+confusion:
+
+#. The type of ``%MyStruct`` is *not* ``{ float*, i32 }`` but rather ``{ float*,
+ i32 }*``. That is, ``%MyStruct`` is a pointer to a structure containing a
+ pointer to a ``float`` and an ``i32``.
+
+#. Point #1 is evidenced by noticing the type of the first operand of the GEP
+ instruction (``%MyStruct``) which is ``{ float*, i32 }*``.
+
+#. The first index, ``i64 0`` is required to step over the global variable
+ ``%MyStruct``. Since the first argument to the GEP instruction must always
+ be a value of pointer type, the first index steps through that pointer. A
+ value of 0 means 0 elements offset from that pointer.
+
+#. The second index, ``i32 1`` selects the second field of the structure (the
+ ``i32``).
+
+What is dereferenced by GEP?
+----------------------------
+
+Quick answer: nothing.
+
+The GetElementPtr instruction dereferences nothing. That is, it doesn't access
+memory in any way. That's what the Load and Store instructions are for. GEP is
+only involved in the computation of addresses. For example, consider this:
+
+.. code-block:: llvm
+
+ %MyVar = uninitialized global { [40 x i32 ]* }
+ ...
+ %idx = getelementptr { [40 x i32]* }* %MyVar, i64 0, i32 0, i64 0, i64 17
+
+In this example, we have a global variable, ``%MyVar`` that is a pointer to a
+structure containing a pointer to an array of 40 ints. The GEP instruction seems
+to be accessing the 18th integer of the structure's array of ints. However, this
+is actually an illegal GEP instruction. It won't compile. The reason is that the
+pointer in the structure <i>must</i> be dereferenced in order to index into the
+array of 40 ints. Since the GEP instruction never accesses memory, it is
+illegal.
+
+In order to access the 18th integer in the array, you would need to do the
+following:
+
+.. code-block:: llvm
+
+ %idx = getelementptr { [40 x i32]* }* %, i64 0, i32 0
+ %arr = load [40 x i32]** %idx
+ %idx = getelementptr [40 x i32]* %arr, i64 0, i64 17
+
+In this case, we have to load the pointer in the structure with a load
+instruction before we can index into the array. If the example was changed to:
+
+.. code-block:: llvm
+
+ %MyVar = uninitialized global { [40 x i32 ] }
+ ...
+ %idx = getelementptr { [40 x i32] }*, i64 0, i32 0, i64 17
+
+then everything works fine. In this case, the structure does not contain a
+pointer and the GEP instruction can index through the global variable, into the
+first field of the structure and access the 18th ``i32`` in the array there.
+
+Why don't GEP x,0,0,1 and GEP x,1 alias?
+----------------------------------------
+
+Quick Answer: They compute different address locations.
+
+If you look at the first indices in these GEP instructions you find that they
+are different (0 and 1), therefore the address computation diverges with that
+index. Consider this example:
+
+.. code-block:: llvm
+
+ %MyVar = global { [10 x i32 ] }
+ %idx1 = getelementptr { [10 x i32 ] }* %MyVar, i64 0, i32 0, i64 1
+ %idx2 = getelementptr { [10 x i32 ] }* %MyVar, i64 1
+
+In this example, ``idx1`` computes the address of the second integer in the
+array that is in the structure in ``%MyVar``, that is ``MyVar+4``. The type of
+``idx1`` is ``i32*``. However, ``idx2`` computes the address of *the next*
+structure after ``%MyVar``. The type of ``idx2`` is ``{ [10 x i32] }*`` and its
+value is equivalent to ``MyVar + 40`` because it indexes past the ten 4-byte
+integers in ``MyVar``. Obviously, in such a situation, the pointers don't
+alias.
+
+Why do GEP x,1,0,0 and GEP x,1 alias?
+-------------------------------------
+
+Quick Answer: They compute the same address location.
+
+These two GEP instructions will compute the same address because indexing
+through the 0th element does not change the address. However, it does change the
+type. Consider this example:
+
+.. code-block:: llvm
+
+ %MyVar = global { [10 x i32 ] }
+ %idx1 = getelementptr { [10 x i32 ] }* %MyVar, i64 1, i32 0, i64 0
+ %idx2 = getelementptr { [10 x i32 ] }* %MyVar, i64 1
+
+In this example, the value of ``%idx1`` is ``%MyVar+40`` and its type is
+``i32*``. The value of ``%idx2`` is also ``MyVar+40`` but its type is ``{ [10 x
+i32] }*``.
+
+Can GEP index into vector elements?
+-----------------------------------
+
+This hasn't always been forcefully disallowed, though it's not recommended. It
+leads to awkward special cases in the optimizers, and fundamental inconsistency
+in the IR. In the future, it will probably be outright disallowed.
+
+What effect do address spaces have on GEPs?
+-------------------------------------------
+
+None, except that the address space qualifier on the first operand pointer type
+always matches the address space qualifier on the result type.
+
+How is GEP different from ``ptrtoint``, arithmetic, and ``inttoptr``?
+---------------------------------------------------------------------
+
+It's very similar; there are only subtle differences.
+
+With ptrtoint, you have to pick an integer type. One approach is to pick i64;
+this is safe on everything LLVM supports (LLVM internally assumes pointers are
+never wider than 64 bits in many places), and the optimizer will actually narrow
+the i64 arithmetic down to the actual pointer size on targets which don't
+support 64-bit arithmetic in most cases. However, there are some cases where it
+doesn't do this. With GEP you can avoid this problem.
+
+Also, GEP carries additional pointer aliasing rules. It's invalid to take a GEP
+from one object, address into a different separately allocated object, and
+dereference it. IR producers (front-ends) must follow this rule, and consumers
+(optimizers, specifically alias analysis) benefit from being able to rely on
+it. See the `Rules`_ section for more information.
+
+And, GEP is more concise in common cases.
+
+However, for the underlying integer computation implied, there is no
+difference.
+
+
+I'm writing a backend for a target which needs custom lowering for GEP. How do I do this?
+-----------------------------------------------------------------------------------------
+
+You don't. The integer computation implied by a GEP is target-independent.
+Typically what you'll need to do is make your backend pattern-match expressions
+trees involving ADD, MUL, etc., which are what GEP is lowered into. This has the
+advantage of letting your code work correctly in more cases.
+
+GEP does use target-dependent parameters for the size and layout of data types,
+which targets can customize.
+
+If you require support for addressing units which are not 8 bits, you'll need to
+fix a lot of code in the backend, with GEP lowering being only a small piece of
+the overall picture.
+
+How does VLA addressing work with GEPs?
+---------------------------------------
+
+GEPs don't natively support VLAs. LLVM's type system is entirely static, and GEP
+address computations are guided by an LLVM type.
+
+VLA indices can be implemented as linearized indices. For example, an expression
+like ``X[a][b][c]``, must be effectively lowered into a form like
+``X[a*m+b*n+c]``, so that it appears to the GEP as a single-dimensional array
+reference.
+
+This means if you want to write an analysis which understands array indices and
+you want to support VLAs, your code will have to be prepared to reverse-engineer
+the linearization. One way to solve this problem is to use the ScalarEvolution
+library, which always presents VLA and non-VLA indexing in the same manner.
+
+.. _Rules:
+
+Rules
+=====
+
+What happens if an array index is out of bounds?
+------------------------------------------------
+
+There are two senses in which an array index can be out of bounds.
+
+First, there's the array type which comes from the (static) type of the first
+operand to the GEP. Indices greater than the number of elements in the
+corresponding static array type are valid. There is no problem with out of
+bounds indices in this sense. Indexing into an array only depends on the size of
+the array element, not the number of elements.
+
+A common example of how this is used is arrays where the size is not known.
+It's common to use array types with zero length to represent these. The fact
+that the static type says there are zero elements is irrelevant; it's perfectly
+valid to compute arbitrary element indices, as the computation only depends on
+the size of the array element, not the number of elements. Note that zero-sized
+arrays are not a special case here.
+
+This sense is unconnected with ``inbounds`` keyword. The ``inbounds`` keyword is
+designed to describe low-level pointer arithmetic overflow conditions, rather
+than high-level array indexing rules.
+
+Analysis passes which wish to understand array indexing should not assume that
+the static array type bounds are respected.
+
+The second sense of being out of bounds is computing an address that's beyond
+the actual underlying allocated object.
+
+With the ``inbounds`` keyword, the result value of the GEP is undefined if the
+address is outside the actual underlying allocated object and not the address
+one-past-the-end.
+
+Without the ``inbounds`` keyword, there are no restrictions on computing
+out-of-bounds addresses. Obviously, performing a load or a store requires an
+address of allocated and sufficiently aligned memory. But the GEP itself is only
+concerned with computing addresses.
+
+Can array indices be negative?
+------------------------------
+
+Yes. This is basically a special case of array indices being out of bounds.
+
+Can I compare two values computed with GEPs?
+--------------------------------------------
+
+Yes. If both addresses are within the same allocated object, or
+one-past-the-end, you'll get the comparison result you expect. If either is
+outside of it, integer arithmetic wrapping may occur, so the comparison may not
+be meaningful.
+
+Can I do GEP with a different pointer type than the type of the underlying object?
+----------------------------------------------------------------------------------
+
+Yes. There are no restrictions on bitcasting a pointer value to an arbitrary
+pointer type. The types in a GEP serve only to define the parameters for the
+underlying integer computation. They need not correspond with the actual type of
+the underlying object.
+
+Furthermore, loads and stores don't have to use the same types as the type of
+the underlying object. Types in this context serve only to specify memory size
+and alignment. Beyond that there are merely a hint to the optimizer indicating
+how the value will likely be used.
+
+Can I cast an object's address to integer and add it to null?
+-------------------------------------------------------------
+
+You can compute an address that way, but if you use GEP to do the add, you can't
+use that pointer to actually access the object, unless the object is managed
+outside of LLVM.
+
+The underlying integer computation is sufficiently defined; null has a defined
+value --- zero --- and you can add whatever value you want to it.
+
+However, it's invalid to access (load from or store to) an LLVM-aware object
+with such a pointer. This includes ``GlobalVariables``, ``Allocas``, and objects
+pointed to by noalias pointers.
+
+If you really need this functionality, you can do the arithmetic with explicit
+integer instructions, and use inttoptr to convert the result to an address. Most
+of GEP's special aliasing rules do not apply to pointers computed from ptrtoint,
+arithmetic, and inttoptr sequences.
+
+Can I compute the distance between two objects, and add that value to one address to compute the other address?
+---------------------------------------------------------------------------------------------------------------
+
+As with arithmetic on null, You can use GEP to compute an address that way, but
+you can't use that pointer to actually access the object if you do, unless the
+object is managed outside of LLVM.
+
+Also as above, ptrtoint and inttoptr provide an alternative way to do this which
+do not have this restriction.
+
+Can I do type-based alias analysis on LLVM IR?
+----------------------------------------------
+
+You can't do type-based alias analysis using LLVM's built-in type system,
+because LLVM has no restrictions on mixing types in addressing, loads or stores.
+
+LLVM's type-based alias analysis pass uses metadata to describe a different type
+system (such as the C type system), and performs type-based aliasing on top of
+that. Further details are in the `language reference <LangRef.html#tbaa>`_.
+
+What happens if a GEP computation overflows?
+--------------------------------------------
+
+If the GEP lacks the ``inbounds`` keyword, the value is the result from
+evaluating the implied two's complement integer computation. However, since
+there's no guarantee of where an object will be allocated in the address space,
+such values have limited meaning.
+
+If the GEP has the ``inbounds`` keyword, the result value is undefined (a "trap
+value") if the GEP overflows (i.e. wraps around the end of the address space).
+
+As such, there are some ramifications of this for inbounds GEPs: scales implied
+by array/vector/pointer indices are always known to be "nsw" since they are
+signed values that are scaled by the element size. These values are also
+allowed to be negative (e.g. "``gep i32 *%P, i32 -1``") but the pointer itself
+is logically treated as an unsigned value. This means that GEPs have an
+asymmetric relation between the pointer base (which is treated as unsigned) and
+the offset applied to it (which is treated as signed). The result of the
+additions within the offset calculation cannot have signed overflow, but when
+applied to the base pointer, there can be signed overflow.
+
+How can I tell if my front-end is following the rules?
+------------------------------------------------------
+
+There is currently no checker for the getelementptr rules. Currently, the only
+way to do this is to manually check each place in your front-end where
+GetElementPtr operators are created.
+
+It's not possible to write a checker which could find all rule violations
+statically. It would be possible to write a checker which works by instrumenting
+the code with dynamic checks though. Alternatively, it would be possible to
+write a static checker which catches a subset of possible problems. However, no
+such checker exists today.
+
+Rationale
+=========
+
+Why is GEP designed this way?
+-----------------------------
+
+The design of GEP has the following goals, in rough unofficial order of
+priority:
+
+* Support C, C-like languages, and languages which can be conceptually lowered
+ into C (this covers a lot).
+
+* Support optimizations such as those that are common in C compilers. In
+ particular, GEP is a cornerstone of LLVM's `pointer aliasing
+ model <LangRef.html#pointeraliasing>`_.
+
+* Provide a consistent method for computing addresses so that address
+ computations don't need to be a part of load and store instructions in the IR.
+
+* Support non-C-like languages, to the extent that it doesn't interfere with
+ other goals.
+
+* Minimize target-specific information in the IR.
+
+Why do struct member indices always use ``i32``?
+------------------------------------------------
+
+The specific type i32 is probably just a historical artifact, however it's wide
+enough for all practical purposes, so there's been no need to change it. It
+doesn't necessarily imply i32 address arithmetic; it's just an identifier which
+identifies a field in a struct. Requiring that all struct indices be the same
+reduces the range of possibilities for cases where two GEPs are effectively the
+same but have distinct operand types.
+
+What's an uglygep?
+------------------
+
+Some LLVM optimizers operate on GEPs by internally lowering them into more
+primitive integer expressions, which allows them to be combined with other
+integer expressions and/or split into multiple separate integer expressions. If
+they've made non-trivial changes, translating back into LLVM IR can involve
+reverse-engineering the structure of the addressing in order to fit it into the
+static type of the original first operand. It isn't always possibly to fully
+reconstruct this structure; sometimes the underlying addressing doesn't
+correspond with the static type at all. In such cases the optimizer instead will
+emit a GEP with the base pointer casted to a simple address-unit pointer, using
+the name "uglygep". This isn't pretty, but it's just as valid, and it's
+sufficient to preserve the pointer aliasing guarantees that GEP provides.
+
+Summary
+=======
+
+In summary, here's some things to always remember about the GetElementPtr
+instruction:
+
+
+#. The GEP instruction never accesses memory, it only provides pointer
+ computations.
+
+#. The first operand to the GEP instruction is always a pointer and it must be
+ indexed.
+
+#. There are no superfluous indices for the GEP instruction.
+
+#. Trailing zero indices are superfluous for pointer aliasing, but not for the
+ types of the pointers.
+
+#. Leading zero indices are not superfluous for pointer aliasing nor the types
+ of the pointers.
diff --git a/docs/GettingStarted.html b/docs/GettingStarted.html
new file mode 100644
index 00000000000..c91cb03d18c
--- /dev/null
+++ b/docs/GettingStarted.html
@@ -0,0 +1,1760 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>Getting Started with LLVM System</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+<body>
+
+<h1>
+ Getting Started with the LLVM System
+</h1>
+
+<ul>
+ <li><a href="#overview">Overview</a>
+ <li><a href="#quickstart">Getting Started Quickly (A Summary)</a>
+ <li><a href="#requirements">Requirements</a>
+ <ol>
+ <li><a href="#hardware">Hardware</a></li>
+ <li><a href="#software">Software</a></li>
+ <li><a href="#brokengcc">Broken versions of GCC and other tools</a></li>
+ </ol></li>
+
+ <li><a href="#starting">Getting Started with LLVM</a>
+ <ol>
+ <li><a href="#terminology">Terminology and Notation</a></li>
+ <li><a href="#environment">Setting Up Your Environment</a></li>
+ <li><a href="#unpack">Unpacking the LLVM Archives</a></li>
+ <li><a href="#checkout">Checkout LLVM from Subversion</a></li>
+ <li><a href="#git_mirror">LLVM GIT mirror</a></li>
+ <li><a href="#config">Local LLVM Configuration</a></li>
+ <li><a href="#compile">Compiling the LLVM Suite Source Code</a></li>
+ <li><a href="#cross-compile">Cross-Compiling LLVM</a></li>
+ <li><a href="#objfiles">The Location of LLVM Object Files</a></li>
+ <li><a href="#optionalconfig">Optional Configuration Items</a></li>
+ </ol></li>
+
+ <li><a href="#layout">Program layout</a>
+ <ol>
+ <li><a href="#examples"><tt>llvm/examples</tt></a></li>
+ <li><a href="#include"><tt>llvm/include</tt></a></li>
+ <li><a href="#lib"><tt>llvm/lib</tt></a></li>
+ <li><a href="#projects"><tt>llvm/projects</tt></a></li>
+ <li><a href="#runtime"><tt>llvm/runtime</tt></a></li>
+ <li><a href="#test"><tt>llvm/test</tt></a></li>
+ <li><a href="#test-suite"><tt>test-suite</tt></a></li>
+ <li><a href="#tools"><tt>llvm/tools</tt></a></li>
+ <li><a href="#utils"><tt>llvm/utils</tt></a></li>
+ </ol></li>
+
+ <li><a href="#tutorial">An Example Using the LLVM Tool Chain</a>
+ <ol>
+ <li><a href="#tutorial4">Example with Clang</a></li>
+ </ol>
+ <li><a href="#problems">Common Problems</a>
+ <li><a href="#links">Links</a>
+</ul>
+
+<div class="doc_author">
+ <p>Written by:
+ <a href="mailto:criswell@uiuc.edu">John Criswell</a>,
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a>,
+ <a href="http://misha.brukman.net/">Misha Brukman</a>,
+ <a href="http://www.cs.uiuc.edu/~vadve">Vikram Adve</a>, and
+ <a href="mailto:gshi1@uiuc.edu">Guochun Shi</a>.
+ </p>
+</div>
+
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="overview">Overview</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to LLVM! In order to get started, you first need to know some
+basic information.</p>
+
+<p>First, LLVM comes in three pieces. The first piece is the LLVM
+suite. This contains all of the tools, libraries, and header files
+needed to use LLVM. It contains an assembler, disassembler, bitcode
+analyzer and bitcode optimizer. It also contains basic regression tests that
+can be used to test the LLVM tools and the Clang front end.</p>
+
+<p>The second piece is the <a href="http://clang.llvm.org/">Clang</a> front end.
+This component compiles C, C++, Objective C, and Objective C++ code into LLVM
+bitcode. Once compiled into LLVM bitcode, a program can be manipulated with the
+LLVM tools from the LLVM suite.
+</p>
+
+<p>
+There is a third, optional piece called Test Suite. It is a suite of programs
+with a testing harness that can be used to further test LLVM's functionality
+and performance.
+</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="quickstart">Getting Started Quickly (A Summary)</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The LLVM Getting Started documentation may be out of date. So, the Clang
+<a href="http://clang.llvm.org/get_started.html">Getting Started</a> page might
+also be a good place to start.</p>
+
+<p>Here's the short story for getting up and running quickly with LLVM:</p>
+
+<ol>
+ <li>Read the documentation.</li>
+ <li>Read the documentation.</li>
+ <li>Remember that you were warned twice about reading the documentation.</li>
+
+ <li>Checkout LLVM:
+ <ul>
+ <li><tt>cd <i>where-you-want-llvm-to-live</i></tt>
+ <li><tt>svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm</tt></li>
+ </ul>
+ </li>
+
+ <li>Checkout Clang:
+ <ul>
+ <li><tt>cd <i>where-you-want-llvm-to-live</i></tt>
+ <li><tt>cd llvm/tools</tt>
+ <li><tt>svn co http://llvm.org/svn/llvm-project/cfe/trunk clang</tt></li>
+ </ul>
+ </li>
+
+ <li>Checkout Compiler-RT:
+ <ul>
+ <li><tt>cd <i>where-you-want-llvm-to-live</i></tt>
+ <li><tt>cd llvm/projects</tt>
+ <li><tt>svn co http://llvm.org/svn/llvm-project/compiler-rt/trunk
+ compiler-rt</tt></li>
+ </ul>
+ </li>
+
+ <li>Get the Test Suite Source Code <b>[Optional]</b>
+ <ul>
+ <li><tt>cd <i>where-you-want-llvm-to-live</i></tt>
+ <li><tt>cd llvm/projects</tt>
+ <li><tt>svn co http://llvm.org/svn/llvm-project/test-suite/trunk test-suite</tt></li>
+ </ul>
+ </li>
+
+ <li>Configure and build LLVM and Clang:
+ <ul>
+ <li><tt>cd <i>where-you-want-to-build-llvm</i></tt></li>
+ <li><tt>mkdir build</tt> (for building without polluting the source dir)</li>
+ <li><tt>cd build</tt></li>
+ <li><tt>../llvm/configure [options]</tt>
+ <br>Some common options:
+
+ <ul>
+ <li><tt>--prefix=<i>directory</i></tt> -
+ Specify for <i>directory</i> the full pathname of where you
+ want the LLVM tools and libraries to be installed (default
+ <tt>/usr/local</tt>).</li>
+ </ul>
+
+ <ul>
+ <li><tt>--enable-optimized</tt> -
+ Compile with optimizations enabled (default is NO).</li>
+ </ul>
+
+ <ul>
+ <li><tt>--enable-assertions</tt> -
+ Compile with assertion checks enabled (default is YES).</li>
+ </ul>
+ </li>
+ <li><tt>make [-j]</tt> - The -j specifies the number of jobs (commands) to
+ run simultaneously. This builds both LLVM and Clang for Debug+Asserts mode.
+ The --enabled-optimized configure option is used to specify a Release build.</li>
+ <li><tt>make check-all</tt> -
+ This run the regression tests to ensure everything is in working order.</li>
+ <li><tt>make update</tt> -
+ This command is used to update all the svn repositories at once, rather then
+ having to <tt>cd</tt> into the individual repositories and running
+ <tt>svn update</tt>.</li>
+ <li>It is also possible to use CMake instead of the makefiles. With CMake
+ it is also possible to generate project files for several IDEs: Eclipse
+ CDT4, CodeBlocks, Qt-Creator (use the CodeBlocks generator), KDevelop3.</li>
+ <li>If you get an "internal compiler error (ICE)" or test failures, see
+ <a href="#brokengcc">below</a>.</li>
+
+ </ul>
+ </li>
+
+</ol>
+
+<p>Consult the <a href="#starting">Getting Started with LLVM</a> section for
+detailed information on configuring and compiling LLVM. See <a
+href="#environment">Setting Up Your Environment</a> for tips that simplify
+working with the Clang front end and LLVM tools. Go to <a href="#layout">Program
+Layout</a> to learn about the layout of the source code tree.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="requirements">Requirements</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Before you begin to use the LLVM system, review the requirements given below.
+This may save you some trouble by knowing ahead of time what hardware and
+software you will need.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="hardware">Hardware</a>
+</h3>
+
+<div>
+
+<p>LLVM is known to work on the following platforms:</p>
+
+<table cellpadding="3" summary="Known LLVM platforms">
+<tr>
+ <th>OS</th>
+ <th>Arch</th>
+ <th>Compilers</th>
+</tr>
+<tr>
+ <td>AuroraUX</td>
+ <td>x86<sup><a href="#pf_1">1</a></sup></td>
+ <td>GCC</td>
+</tr>
+<tr>
+ <td>Linux</td>
+ <td>x86<sup><a href="#pf_1">1</a></sup></td>
+ <td>GCC</td>
+</tr>
+<tr>
+ <td>Linux</td>
+ <td>amd64</td>
+ <td>GCC</td>
+</tr>
+<tr>
+ <td>Solaris</td>
+ <td>V9 (Ultrasparc)</td>
+ <td>GCC</td>
+</tr>
+<tr>
+ <td>FreeBSD</td>
+ <td>x86<sup><a href="#pf_1">1</a></sup></td>
+ <td>GCC</td>
+</tr>
+<tr>
+ <td>FreeBSD</td>
+ <td>amd64</td>
+ <td>GCC</td>
+</tr>
+<tr>
+ <td>MacOS X<sup><a href="#pf_2">2</a></sup></td>
+ <td>PowerPC</td>
+ <td>GCC</td>
+</tr>
+<tr>
+ <td>MacOS X<sup><a href="#pf_2">2</a>,<a href="#pf_9">9</a></sup></td>
+ <td>x86</td>
+ <td>GCC</td>
+</tr>
+<tr>
+ <td>Cygwin/Win32</td>
+ <td>x86<sup><a href="#pf_1">1</a>,<a href="#pf_8">8</a>,
+ <a href="#pf_11">11</a></sup></td>
+ <td>GCC 3.4.X, binutils 2.20</td>
+</tr>
+<tr>
+ <td>MinGW/Win32</td>
+ <td>x86<sup><a href="#pf_1">1</a>,<a href="#pf_6">6</a>,
+ <a href="#pf_8">8</a>, <a href="#pf_10">10</a>,
+ <a href="#pf_11">11</a></sup></td>
+ <td>GCC 3.4.X, binutils 2.20</td>
+</tr>
+</table>
+
+<p>LLVM has partial support for the following platforms:</p>
+
+<table summary="LLVM partial platform support">
+<tr>
+ <th>OS</th>
+ <th>Arch</th>
+ <th>Compilers</th>
+</tr>
+<tr>
+ <td>Windows</td>
+ <td>x86<sup><a href="#pf_1">1</a></sup></td>
+ <td>Visual Studio 2008 or higher<sup><a href="#pf_4">4</a>,<a href="#pf_5">5</a></sup></td>
+<tr>
+ <td>AIX<sup><a href="#pf_3">3</a>,<a href="#pf_4">4</a></sup></td>
+ <td>PowerPC</td>
+ <td>GCC</td>
+</tr>
+<tr>
+ <td>Linux<sup><a href="#pf_3">3</a>,<a href="#pf_5">5</a></sup></td>
+ <td>PowerPC</td>
+ <td>GCC</td>
+</tr>
+
+<tr>
+ <td>Linux<sup><a href="#pf_7">7</a></sup></td>
+ <td>Alpha</td>
+ <td>GCC</td>
+</tr>
+<tr>
+ <td>Linux<sup><a href="#pf_7">7</a></sup></td>
+ <td>Itanium (IA-64)</td>
+ <td>GCC</td>
+</tr>
+<tr>
+ <td>HP-UX<sup><a href="#pf_7">7</a></sup></td>
+ <td>Itanium (IA-64)</td>
+ <td>HP aCC</td>
+</tr>
+<tr>
+ <td>Windows x64</td>
+ <td>x86-64</td>
+ <td>mingw-w64's GCC-4.5.x<sup><a href="#pf_12">12</a></sup></td>
+</tr>
+</table>
+
+<p><b>Notes:</b></p>
+
+<div class="doc_notes">
+<ol>
+<li><a name="pf_1">Code generation supported for Pentium processors and
+up</a></li>
+<li><a name="pf_2">Code generation supported for 32-bit ABI only</a></li>
+<li><a name="pf_3">No native code generation</a></li>
+<li><a name="pf_4">Build is not complete: one or more tools do not link or function</a></li>
+<li><a name="pf_5">The GCC-based C/C++ frontend does not build</a></li>
+<li><a name="pf_6">The port is done using the MSYS shell.</a></li>
+<li><a name="pf_7">Native code generation exists but is not complete.</a></li>
+<li><a name="pf_8">Binutils 2.20 or later is required to build the assembler
+ generated by LLVM properly.</a></li>
+<li><a name="pf_9">Xcode 2.5 and gcc 4.0.1</a> (Apple Build 5370) will trip
+ internal LLVM assert messages when compiled for Release at optimization
+ levels greater than 0 (i.e., <i>"-O1"</i> and higher).
+ Add <i>OPTIMIZE_OPTION="-O0"</i> to the build command line
+ if compiling for LLVM Release or bootstrapping the LLVM toolchain.</li>
+<li><a name="pf_10">For MSYS/MinGW on Windows, be sure to install the MSYS
+ version of the perl package, and be sure it appears in your path
+ before any Windows-based versions such as Strawberry Perl and
+ ActivePerl, as these have Windows-specifics that will cause the
+ build to fail.</a></li>
+<li><a name="pf_11">To use LLVM modules on Win32-based system,
+ you may configure LLVM with <i>&quot;--enable-shared&quot;</i>.</a></li>
+<li><a name="pf_12">To compile SPU backend, you need to add
+ <tt>&quot;LDFLAGS=-Wl,--stack,16777216&quot;</tt> to configure.</a></li>
+</ol>
+</div>
+
+<p>Note that you will need about 1-3 GB of space for a full LLVM build in Debug
+mode, depending on the system (it is so large because of all the debugging
+information and the fact that the libraries are statically linked into multiple
+tools). If you do not need many of the tools and you are space-conscious, you
+can pass <tt>ONLY_TOOLS="tools you need"</tt> to make. The Release build
+requires considerably less space.</p>
+
+<p>The LLVM suite <i>may</i> compile on other platforms, but it is not
+guaranteed to do so. If compilation is successful, the LLVM utilities should be
+able to assemble, disassemble, analyze, and optimize LLVM bitcode. Code
+generation should work as well, although the generated native code may not work
+on your platform.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="software">Software</a>
+</h3>
+<div>
+ <p>Compiling LLVM requires that you have several software packages
+ installed. The table below lists those required packages. The Package column
+ is the usual name for the software package that LLVM depends on. The Version
+ column provides "known to work" versions of the package. The Notes column
+ describes how LLVM uses the package and provides other details.</p>
+ <table summary="Packages required to compile LLVM">
+ <tr><th>Package</th><th>Version</th><th>Notes</th></tr>
+
+ <tr>
+ <td><a href="http://savannah.gnu.org/projects/make">GNU Make</a></td>
+ <td>3.79, 3.79.1</td>
+ <td>Makefile/build processor</td>
+ </tr>
+
+ <tr>
+ <td><a href="http://gcc.gnu.org/">GCC</a></td>
+ <td>3.4.2</td>
+ <td>C/C++ compiler<sup><a href="#sf1">1</a></sup></td>
+ </tr>
+
+ <tr>
+ <td><a href="http://www.gnu.org/software/texinfo/">TeXinfo</a></td>
+ <td>4.5</td>
+ <td>For building the CFE</td>
+ </tr>
+
+ <tr>
+ <td><a href="http://subversion.tigris.org/project_packages.html">SVN</a></td>
+ <td>&ge;1.3</td>
+ <td>Subversion access to LLVM<sup><a href="#sf2">2</a></sup></td>
+ </tr>
+
+ <!-- FIXME:
+ Do we support dg?
+ Are DejaGnu and expect obsolete?
+ Shall we mention Python? -->
+
+ <tr>
+ <td><a href="http://savannah.gnu.org/projects/dejagnu">DejaGnu</a></td>
+ <td>1.4.2</td>
+ <td>Automated test suite<sup><a href="#sf3">3</a></sup></td>
+ </tr>
+
+ <tr>
+ <td><a href="http://www.tcl.tk/software/tcltk/">tcl</a></td>
+ <td>8.3, 8.4</td>
+ <td>Automated test suite<sup><a href="#sf3">3</a></sup></td>
+ </tr>
+
+ <tr>
+ <td><a href="http://expect.nist.gov/">expect</a></td>
+ <td>5.38.0</td>
+ <td>Automated test suite<sup><a href="#sf3">3</a></sup></td>
+ </tr>
+
+ <tr>
+ <td><a href="http://www.perl.com/download.csp">perl</a></td>
+ <td>&ge;5.6.0</td>
+ <td>Utilities</td>
+ </tr>
+
+ <tr>
+ <td><a href="http://savannah.gnu.org/projects/m4">GNU M4</a>
+ <td>1.4</td>
+ <td>Macro processor for configuration<sup><a href="#sf4">4</a></sup></td>
+ </tr>
+
+ <tr>
+ <td><a href="http://www.gnu.org/software/autoconf/">GNU Autoconf</a></td>
+ <td>2.60</td>
+ <td>Configuration script builder<sup><a href="#sf4">4</a></sup></td>
+ </tr>
+
+ <tr>
+ <td><a href="http://www.gnu.org/software/automake/">GNU Automake</a></td>
+ <td>1.9.6</td>
+ <td>aclocal macro generator<sup><a href="#sf4">4</a></sup></td>
+ </tr>
+
+ <tr>
+ <td><a href="http://savannah.gnu.org/projects/libtool">libtool</a></td>
+ <td>1.5.22</td>
+ <td>Shared library manager<sup><a href="#sf4">4</a></sup></td>
+ </tr>
+
+ </table>
+
+ <p><b>Notes:</b></p>
+ <div class="doc_notes">
+ <ol>
+ <li><a name="sf1">Only the C and C++ languages are needed so there's no
+ need to build the other languages for LLVM's purposes.</a> See
+ <a href="#brokengcc">below</a> for specific version info.</li>
+ <li><a name="sf2">You only need Subversion if you intend to build from the
+ latest LLVM sources. If you're working from a release distribution, you
+ don't need Subversion.</a></li>
+ <li><a name="sf3">Only needed if you want to run the automated test
+ suite in the <tt>llvm/test</tt> directory.</a></li>
+ <li><a name="sf4">If you want to make changes to the configure scripts,
+ you will need GNU autoconf (2.60), and consequently, GNU M4 (version 1.4
+ or higher). You will also need automake (1.9.6). We only use aclocal
+ from that package.</a></li>
+ </ol>
+ </div>
+
+ <p>Additionally, your compilation host is expected to have the usual
+ plethora of Unix utilities. Specifically:</p>
+ <ul>
+ <li><b>ar</b> - archive library builder</li>
+ <li><b>bzip2*</b> - bzip2 command for distribution generation</li>
+ <li><b>bunzip2*</b> - bunzip2 command for distribution checking</li>
+ <li><b>chmod</b> - change permissions on a file</li>
+ <li><b>cat</b> - output concatenation utility</li>
+ <li><b>cp</b> - copy files</li>
+ <li><b>date</b> - print the current date/time </li>
+ <li><b>echo</b> - print to standard output</li>
+ <li><b>egrep</b> - extended regular expression search utility</li>
+ <li><b>find</b> - find files/dirs in a file system</li>
+ <li><b>grep</b> - regular expression search utility</li>
+ <li><b>gzip*</b> - gzip command for distribution generation</li>
+ <li><b>gunzip*</b> - gunzip command for distribution checking</li>
+ <li><b>install</b> - install directories/files </li>
+ <li><b>mkdir</b> - create a directory</li>
+ <li><b>mv</b> - move (rename) files</li>
+ <li><b>ranlib</b> - symbol table builder for archive libraries</li>
+ <li><b>rm</b> - remove (delete) files and directories</li>
+ <li><b>sed</b> - stream editor for transforming output</li>
+ <li><b>sh</b> - Bourne shell for make build scripts</li>
+ <li><b>tar</b> - tape archive for distribution generation</li>
+ <li><b>test</b> - test things in file system</li>
+ <li><b>unzip*</b> - unzip command for distribution checking</li>
+ <li><b>zip*</b> - zip command for distribution generation</li>
+ </ul>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="brokengcc">Broken versions of GCC and other tools</a>
+</h3>
+
+<div>
+
+<p>LLVM is very demanding of the host C++ compiler, and as such tends to expose
+bugs in the compiler. In particular, several versions of GCC crash when trying
+to compile LLVM. We routinely use GCC 4.2 (and higher) or Clang.
+Other versions of GCC will probably work as well. GCC versions listed
+here are known to not work. If you are using one of these versions, please try
+to upgrade your GCC to something more recent. If you run into a problem with a
+version of GCC not listed here, please <a href="mailto:llvmdev@cs.uiuc.edu">let
+us know</a>. Please use the "<tt>gcc -v</tt>" command to find out which version
+of GCC you are using.
+</p>
+
+<p><b>GCC versions prior to 3.0</b>: GCC 2.96.x and before had several
+problems in the STL that effectively prevent it from compiling LLVM.
+</p>
+
+<p><b>GCC 3.2.2 and 3.2.3</b>: These versions of GCC fails to compile LLVM with
+a bogus template error. This was fixed in later GCCs.</p>
+
+<p><b>GCC 3.3.2</b>: This version of GCC suffered from a <a
+href="http://gcc.gnu.org/PR13392">serious bug</a> which causes it to crash in
+the "<tt>convert_from_eh_region_ranges_1</tt>" GCC function.</p>
+
+<p><b>Cygwin GCC 3.3.3</b>: The version of GCC 3.3.3 commonly shipped with
+ Cygwin does not work.</p>
+<p><b>SuSE GCC 3.3.3</b>: The version of GCC 3.3.3 shipped with SuSE 9.1 (and
+ possibly others) does not compile LLVM correctly (it appears that exception
+ handling is broken in some cases). Please download the FSF 3.3.3 or upgrade
+ to a newer version of GCC.</p>
+<p><b>GCC 3.4.0 on linux/x86 (32-bit)</b>: GCC miscompiles portions of the
+ code generator, causing an infinite loop in the llvm-gcc build when built
+ with optimizations enabled (i.e. a release build).</p>
+<p><b>GCC 3.4.2 on linux/x86 (32-bit)</b>: GCC miscompiles portions of the
+ code generator at -O3, as with 3.4.0. However gcc 3.4.2 (unlike 3.4.0)
+ correctly compiles LLVM at -O2. A work around is to build release LLVM
+ builds with "make ENABLE_OPTIMIZED=1 OPTIMIZE_OPTION=-O2 ..."</p>
+<p><b>GCC 3.4.x on X86-64/amd64</b>: GCC <a href="http://llvm.org/PR1056">
+ miscompiles portions of LLVM</a>.</p>
+<p><b>GCC 3.4.4 (CodeSourcery ARM 2005q3-2)</b>: this compiler miscompiles LLVM
+ when building with optimizations enabled. It appears to work with
+ "<tt>make ENABLE_OPTIMIZED=1 OPTIMIZE_OPTION=-O1</tt>" or build a debug
+ build.</p>
+<p><b>IA-64 GCC 4.0.0</b>: The IA-64 version of GCC 4.0.0 is known to
+ miscompile LLVM.</p>
+<p><b>Apple Xcode 2.3</b>: GCC crashes when compiling LLVM at -O3 (which is the
+ default with ENABLE_OPTIMIZED=1. To work around this, build with
+ "ENABLE_OPTIMIZED=1 OPTIMIZE_OPTION=-O2".</p>
+<p><b>GCC 4.1.1</b>: GCC fails to build LLVM with template concept check errors
+ compiling some files. At the time of this writing, GCC mainline (4.2)
+ did not share the problem.</p>
+<p><b>GCC 4.1.1 on X86-64/amd64</b>: GCC <a href="http://llvm.org/PR1063">
+ miscompiles portions of LLVM</a> when compiling llvm itself into 64-bit
+ code. LLVM will appear to mostly work but will be buggy, e.g. failing
+ portions of its testsuite.</p>
+<p><b>GCC 4.1.2 on OpenSUSE</b>: Seg faults during libstdc++ build and on x86_64
+platforms compiling md5.c gets a mangled constant.</p>
+<p><b>GCC 4.1.2 (20061115 (prerelease) (Debian 4.1.1-21)) on Debian</b>: Appears
+to miscompile parts of LLVM 2.4. One symptom is ValueSymbolTable complaining
+about symbols remaining in the table on destruction.</p>
+<p><b>GCC 4.1.2 20071124 (Red Hat 4.1.2-42)</b>: Suffers from the same symptoms
+as the previous one. It appears to work with ENABLE_OPTIMIZED=0 (the default).</p>
+<p><b>Cygwin GCC 4.3.2 20080827 (beta) 2</b>:
+ Users <a href="http://llvm.org/PR4145">reported</a> various problems related
+ with link errors when using this GCC version.</p>
+<p><b>Debian GCC 4.3.2 on X86</b>: Crashes building some files in LLVM 2.6.</p>
+<p><b>GCC 4.3.3 (Debian 4.3.3-10) on ARM</b>: Miscompiles parts of LLVM 2.6
+when optimizations are turned on. The symptom is an infinite loop in
+FoldingSetImpl::RemoveNode while running the code generator.</p>
+<p><b>SUSE 11 GCC 4.3.4</b>: Miscompiles LLVM, causing crashes in ValueHandle logic.</p>
+<p><b>GCC 4.3.5 and GCC 4.4.5 on ARM</b>: These can miscompile <tt>value >>
+1</tt> even at -O0. A test failure in <tt>test/Assembler/alignstack.ll</tt> is
+one symptom of the problem.
+<p><b>GNU ld 2.16.X</b>. Some 2.16.X versions of the ld linker will produce very
+long warning messages complaining that some ".gnu.linkonce.t.*" symbol was
+defined in a discarded section. You can safely ignore these messages as they are
+erroneous and the linkage is correct. These messages disappear using ld
+2.17.</p>
+
+<p><b>GNU binutils 2.17</b>: Binutils 2.17 contains <a
+href="http://sourceware.org/bugzilla/show_bug.cgi?id=3111">a bug</a> which
+causes huge link times (minutes instead of seconds) when building LLVM. We
+recommend upgrading to a newer version (2.17.50.0.4 or later).</p>
+
+<p><b>GNU Binutils 2.19.1 Gold</b>: This version of Gold contained
+<a href="http://sourceware.org/bugzilla/show_bug.cgi?id=9836">a bug</a>
+which causes intermittent failures when building LLVM with position independent
+code. The symptom is an error about cyclic dependencies. We recommend
+upgrading to a newer version of Gold.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="starting">Getting Started with LLVM</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The remainder of this guide is meant to get you up and running with
+LLVM and to give you some basic information about the LLVM environment.</p>
+
+<p>The later sections of this guide describe the <a
+href="#layout">general layout</a> of the LLVM source tree, a <a
+href="#tutorial">simple example</a> using the LLVM tool chain, and <a
+href="#links">links</a> to find more information about LLVM or to get
+help via e-mail.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="terminology">Terminology and Notation</a>
+</h3>
+
+<div>
+
+<p>Throughout this manual, the following names are used to denote paths
+specific to the local system and working environment. <i>These are not
+environment variables you need to set but just strings used in the rest
+of this document below</i>. In any of the examples below, simply replace
+each of these names with the appropriate pathname on your local system.
+All these paths are absolute:</p>
+
+<dl>
+ <dt>SRC_ROOT
+ <dd>
+ This is the top level directory of the LLVM source tree.
+ <br><br>
+
+ <dt>OBJ_ROOT
+ <dd>
+ This is the top level directory of the LLVM object tree (i.e. the
+ tree where object files and compiled programs will be placed. It
+ can be the same as SRC_ROOT).
+ <br><br>
+
+</dl>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="environment">Setting Up Your Environment</a>
+</h3>
+
+<div>
+
+<p>
+In order to compile and use LLVM, you may need to set some environment
+variables.
+
+<dl>
+ <dt><tt>LLVM_LIB_SEARCH_PATH</tt>=<tt>/path/to/your/bitcode/libs</tt></dt>
+ <dd>[Optional] This environment variable helps LLVM linking tools find the
+ locations of your bitcode libraries. It is provided only as a
+ convenience since you can specify the paths using the -L options of the
+ tools and the C/C++ front-end will automatically use the bitcode files
+ installed in its
+ <tt>lib</tt> directory.</dd>
+</dl>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="unpack">Unpacking the LLVM Archives</a>
+</h3>
+
+<div>
+
+<p>
+If you have the LLVM distribution, you will need to unpack it before you
+can begin to compile it. LLVM is distributed as a set of two files: the LLVM
+suite and the LLVM GCC front end compiled for your platform. There is an
+additional test suite that is optional. Each file is a TAR archive that is
+compressed with the gzip program.
+</p>
+
+<p>The files are as follows, with <em>x.y</em> marking the version number:
+<dl>
+ <dt><tt>llvm-x.y.tar.gz</tt></dt>
+ <dd>Source release for the LLVM libraries and tools.<br></dd>
+
+ <dt><tt>llvm-test-x.y.tar.gz</tt></dt>
+ <dd>Source release for the LLVM test-suite.</dd>
+
+ <dt><tt>llvm-gcc-4.2-x.y.source.tar.gz</tt></dt>
+ <dd>Source release of the llvm-gcc-4.2 front end. See README.LLVM in the root
+ directory for build instructions.<br></dd>
+
+ <dt><tt>llvm-gcc-4.2-x.y-platform.tar.gz</tt></dt>
+ <dd>Binary release of the llvm-gcc-4.2 front end for a specific platform.<br></dd>
+
+</dl>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="checkout">Checkout LLVM from Subversion</a>
+</h3>
+
+<div>
+
+<p>If you have access to our Subversion repository, you can get a fresh copy of
+the entire source code. All you need to do is check it out from Subversion as
+follows:</p>
+
+<ul>
+ <li><tt>cd <i>where-you-want-llvm-to-live</i></tt></li>
+ <li>Read-Only: <tt>svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm</tt></li>
+ <li>Read-Write:<tt>svn co https://user@llvm.org/svn/llvm-project/llvm/trunk
+ llvm</tt></li>
+</ul>
+
+
+<p>This will create an '<tt>llvm</tt>' directory in the current
+directory and fully populate it with the LLVM source code, Makefiles,
+test directories, and local copies of documentation files.</p>
+
+<p>If you want to get a specific release (as opposed to the most recent
+revision), you can checkout it from the '<tt>tags</tt>' directory (instead of
+'<tt>trunk</tt>'). The following releases are located in the following
+subdirectories of the '<tt>tags</tt>' directory:</p>
+
+<ul>
+<li>Release 3.1: <b>RELEASE_31/final</b></li>
+<li>Release 3.0: <b>RELEASE_30/final</b></li>
+<li>Release 2.9: <b>RELEASE_29/final</b></li>
+<li>Release 2.8: <b>RELEASE_28</b></li>
+<li>Release 2.7: <b>RELEASE_27</b></li>
+<li>Release 2.6: <b>RELEASE_26</b></li>
+<li>Release 2.5: <b>RELEASE_25</b></li>
+<li>Release 2.4: <b>RELEASE_24</b></li>
+<li>Release 2.3: <b>RELEASE_23</b></li>
+<li>Release 2.2: <b>RELEASE_22</b></li>
+<li>Release 2.1: <b>RELEASE_21</b></li>
+<li>Release 2.0: <b>RELEASE_20</b></li>
+<li>Release 1.9: <b>RELEASE_19</b></li>
+<li>Release 1.8: <b>RELEASE_18</b></li>
+<li>Release 1.7: <b>RELEASE_17</b></li>
+<li>Release 1.6: <b>RELEASE_16</b></li>
+<li>Release 1.5: <b>RELEASE_15</b></li>
+<li>Release 1.4: <b>RELEASE_14</b></li>
+<li>Release 1.3: <b>RELEASE_13</b></li>
+<li>Release 1.2: <b>RELEASE_12</b></li>
+<li>Release 1.1: <b>RELEASE_11</b></li>
+<li>Release 1.0: <b>RELEASE_1</b></li>
+</ul>
+
+<p>If you would like to get the LLVM test suite (a separate package as of 1.4),
+you get it from the Subversion repository:</p>
+
+<div class="doc_code">
+<pre>
+% cd llvm/projects
+% svn co http://llvm.org/svn/llvm-project/test-suite/trunk test-suite
+</pre>
+</div>
+
+<p>By placing it in the <tt>llvm/projects</tt>, it will be automatically
+configured by the LLVM configure script as well as automatically updated when
+you run <tt>svn update</tt>.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="git_mirror">GIT mirror</a>
+</h3>
+
+<div>
+
+<p>GIT mirrors are available for a number of LLVM subprojects. These mirrors
+ sync automatically with each Subversion commit and contain all necessary
+ git-svn marks (so, you can recreate git-svn metadata locally). Note that right
+ now mirrors reflect only <tt>trunk</tt> for each project. You can do the
+ read-only GIT clone of LLVM via:</p>
+
+<pre class="doc_code">
+git clone http://llvm.org/git/llvm.git
+</pre>
+
+<p>If you want to check out clang too, run:</p>
+
+<pre class="doc_code">
+git clone http://llvm.org/git/llvm.git
+cd llvm/tools
+git clone http://llvm.org/git/clang.git
+</pre>
+
+<p>
+Since the upstream repository is in Subversion, you should use
+<tt>&quot;git pull --rebase&quot;</tt>
+instead of <tt>&quot;git pull&quot;</tt> to avoid generating a non-linear
+history in your clone.
+To configure <tt>&quot;git pull&quot;</tt> to pass <tt>--rebase</tt> by default
+on the master branch, run the following command:
+</p>
+
+<pre class="doc_code">
+git config branch.master.rebase true
+</pre>
+
+<h4>Sending patches with Git</h4>
+<div>
+<p>
+Please read <a href="DeveloperPolicy.html#patches">Developer Policy</a>, too.
+</p>
+
+<p>
+Assume <tt>master</tt> points the upstream and <tt>mybranch</tt> points your
+working branch, and <tt>mybranch</tt> is rebased onto <tt>master</tt>.
+At first you may check sanity of whitespaces:
+</p>
+
+<pre class="doc_code">
+git diff --check master..mybranch
+</pre>
+
+<p>
+The easiest way to generate a patch is as below:
+</p>
+
+<pre class="doc_code">
+git diff master..mybranch &gt; /path/to/mybranch.diff
+</pre>
+
+<p>
+It is a little different from svn-generated diff. git-diff-generated diff has
+prefixes like <tt>a/</tt> and <tt>b/</tt>. Don't worry, most developers might
+know it could be accepted with <tt>patch -p1 -N</tt>.
+</p>
+
+<p>
+But you may generate patchset with git-format-patch. It generates
+by-each-commit patchset. To generate patch files to attach to your article:
+</p>
+
+<pre class="doc_code">
+git format-patch --no-attach master..mybranch -o /path/to/your/patchset
+</pre>
+
+<p>
+If you would like to send patches directly, you may use git-send-email or
+git-imap-send. Here is an example to generate the patchset in Gmail's [Drafts].
+</p>
+
+<pre class="doc_code">
+git format-patch --attach master..mybranch --stdout | git imap-send
+</pre>
+
+<p>
+Then, your .git/config should have [imap] sections.
+</p>
+
+<pre class="doc_code">
+[imap]
+ host = imaps://imap.gmail.com
+ user = <em>your.gmail.account</em>@gmail.com
+ pass = <em>himitsu!</em>
+ port = 993
+ sslverify = false
+; in English
+ folder = "[Gmail]/Drafts"
+; example for Japanese, "Modified UTF-7" encoded.
+ folder = "[Gmail]/&amp;Tgtm+DBN-"
+; example for Traditional Chinese
+ folder = "[Gmail]/&amp;g0l6Pw-"
+</pre>
+
+</div>
+
+<h4>For developers to work with git-svn</h4>
+<div>
+
+<p>To set up clone from which you can submit code using
+ <tt>git-svn</tt>, run:</p>
+
+<pre class="doc_code">
+git clone http://llvm.org/git/llvm.git
+cd llvm
+git svn init https://llvm.org/svn/llvm-project/llvm/trunk --username=&lt;username>
+git config svn-remote.svn.fetch :refs/remotes/origin/master
+git svn rebase -l # -l avoids fetching ahead of the git mirror.
+
+# If you have clang too:
+cd tools
+git clone http://llvm.org/git/clang.git
+cd clang
+git svn init https://llvm.org/svn/llvm-project/cfe/trunk --username=&lt;username>
+git config svn-remote.svn.fetch :refs/remotes/origin/master
+git svn rebase -l
+</pre>
+
+<p>To update this clone without generating git-svn tags that conflict
+with the upstream git repo, run:</p>
+
+<pre class="doc_code">
+git fetch && (cd tools/clang && git fetch) # Get matching revisions of both trees.
+git checkout master
+git svn rebase -l
+(cd tools/clang &&
+ git checkout master &&
+ git svn rebase -l)
+</pre>
+
+<p>This leaves your working directories on their master branches, so
+you'll need to <tt>checkout</tt> each working branch individually and
+<tt>rebase</tt> it on top of its parent branch. (Note: This script is
+intended for relative newbies to git. If you have more experience,
+you can likely improve on it.)</p>
+
+<p>The git-svn metadata can get out of sync after you mess around with
+branches and <code>dcommit</code>. When that happens, <code>git svn
+dcommit</code> stops working, complaining about files with uncommitted
+changes. The fix is to rebuild the metadata:</p>
+
+<pre class="doc_code">
+rm -rf .git/svn
+git svn rebase -l
+</pre>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="config">Local LLVM Configuration</a>
+</h3>
+
+<div>
+
+ <p>Once checked out from the Subversion repository, the LLVM suite source
+ code must be
+configured via the <tt>configure</tt> script. This script sets variables in the
+various <tt>*.in</tt> files, most notably <tt>llvm/Makefile.config</tt> and
+<tt>llvm/include/Config/config.h</tt>. It also populates <i>OBJ_ROOT</i> with
+the Makefiles needed to begin building LLVM.</p>
+
+<p>The following environment variables are used by the <tt>configure</tt>
+script to configure the build system:</p>
+
+<table summary="LLVM configure script environment variables">
+ <tr><th>Variable</th><th>Purpose</th></tr>
+ <tr>
+ <td>CC</td>
+ <td>Tells <tt>configure</tt> which C compiler to use. By default,
+ <tt>configure</tt> will look for the first GCC C compiler in
+ <tt>PATH</tt>. Use this variable to override
+ <tt>configure</tt>'s default behavior.</td>
+ </tr>
+ <tr>
+ <td>CXX</td>
+ <td>Tells <tt>configure</tt> which C++ compiler to use. By default,
+ <tt>configure</tt> will look for the first GCC C++ compiler in
+ <tt>PATH</tt>. Use this variable to override
+ <tt>configure</tt>'s default behavior.</td>
+ </tr>
+</table>
+
+<p>The following options can be used to set or enable LLVM specific options:</p>
+
+<dl>
+ <dt><i>--enable-optimized</i></dt>
+ <dd>
+ Enables optimized compilation (debugging symbols are removed
+ and GCC optimization flags are enabled). Note that this is the default
+ setting if you are using the LLVM distribution. The default behavior
+ of an Subversion checkout is to use an unoptimized build (also known as a
+ debug build).
+ <br><br>
+ </dd>
+ <dt><i>--enable-debug-runtime</i></dt>
+ <dd>
+ Enables debug symbols in the runtime libraries. The default is to strip
+ debug symbols from the runtime libraries.
+ </dd>
+ <dt><i>--enable-jit</i></dt>
+ <dd>
+ Compile the Just In Time (JIT) compiler functionality. This is not
+ available
+ on all platforms. The default is dependent on platform, so it is best
+ to explicitly enable it if you want it.
+ <br><br>
+ </dd>
+ <dt><i>--enable-targets=</i><tt>target-option</tt></dt>
+ <dd>Controls which targets will be built and linked into llc. The default
+ value for <tt>target_options</tt> is "all" which builds and links all
+ available targets. The value "host-only" can be specified to build only a
+ native compiler (no cross-compiler targets available). The "native" target is
+ selected as the target of the build host. You can also specify a comma
+ separated list of target names that you want available in llc. The target
+ names use all lower case. The current set of targets is: <br>
+ <tt>arm, cpp, hexagon, mblaze, mips, mipsel, msp430, powerpc, ptx, sparc, spu, x86, x86_64, xcore</tt>.
+ <br><br></dd>
+ <dt><i>--enable-doxygen</i></dt>
+ <dd>Look for the doxygen program and enable construction of doxygen based
+ documentation from the source code. This is disabled by default because
+ generating the documentation can take a long time and producess 100s of
+ megabytes of output.</dd>
+ <dt><i>--with-udis86</i></dt>
+ <dd>LLVM can use external disassembler library for various purposes (now it's
+ used only for examining code produced by JIT). This option will enable usage
+ of <a href="http://udis86.sourceforge.net/">udis86</a> x86 (both 32 and 64
+ bits) disassembler library.</dd>
+</dl>
+
+<p>To configure LLVM, follow these steps:</p>
+
+<ol>
+ <li><p>Change directory into the object root directory:</p>
+
+ <div class="doc_code"><pre>% cd <i>OBJ_ROOT</i></pre></div></li>
+
+ <li><p>Run the <tt>configure</tt> script located in the LLVM source
+ tree:</p>
+
+ <div class="doc_code">
+ <pre>% <i>SRC_ROOT</i>/configure --prefix=/install/path [other options]</pre>
+ </div></li>
+</ol>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="compile">Compiling the LLVM Suite Source Code</a>
+</h3>
+
+<div>
+
+<p>Once you have configured LLVM, you can build it. There are three types of
+builds:</p>
+
+<dl>
+ <dt>Debug Builds
+ <dd>
+ These builds are the default when one is using an Subversion checkout and
+ types <tt>gmake</tt> (unless the <tt>--enable-optimized</tt> option was
+ used during configuration). The build system will compile the tools and
+ libraries with debugging information. To get a Debug Build using the
+ LLVM distribution the <tt>--disable-optimized</tt> option must be passed
+ to <tt>configure</tt>.
+ <br><br>
+
+ <dt>Release (Optimized) Builds
+ <dd>
+ These builds are enabled with the <tt>--enable-optimized</tt> option to
+ <tt>configure</tt> or by specifying <tt>ENABLE_OPTIMIZED=1</tt> on the
+ <tt>gmake</tt> command line. For these builds, the build system will
+ compile the tools and libraries with GCC optimizations enabled and strip
+ debugging information from the libraries and executables it generates.
+ Note that Release Builds are default when using an LLVM distribution.
+ <br><br>
+
+ <dt>Profile Builds
+ <dd>
+ These builds are for use with profiling. They compile profiling
+ information into the code for use with programs like <tt>gprof</tt>.
+ Profile builds must be started by specifying <tt>ENABLE_PROFILING=1</tt>
+ on the <tt>gmake</tt> command line.
+</dl>
+
+<p>Once you have LLVM configured, you can build it by entering the
+<i>OBJ_ROOT</i> directory and issuing the following command:</p>
+
+<div class="doc_code"><pre>% gmake</pre></div>
+
+<p>If the build fails, please <a href="#brokengcc">check here</a> to see if you
+are using a version of GCC that is known not to compile LLVM.</p>
+
+<p>
+If you have multiple processors in your machine, you may wish to use some of
+the parallel build options provided by GNU Make. For example, you could use the
+command:</p>
+
+<div class="doc_code"><pre>% gmake -j2</pre></div>
+
+<p>There are several special targets which are useful when working with the LLVM
+source code:</p>
+
+<dl>
+ <dt><tt>gmake clean</tt>
+ <dd>
+ Removes all files generated by the build. This includes object files,
+ generated C/C++ files, libraries, and executables.
+ <br><br>
+
+ <dt><tt>gmake dist-clean</tt>
+ <dd>
+ Removes everything that <tt>gmake clean</tt> does, but also removes files
+ generated by <tt>configure</tt>. It attempts to return the source tree to the
+ original state in which it was shipped.
+ <br><br>
+
+ <dt><tt>gmake install</tt>
+ <dd>
+ Installs LLVM header files, libraries, tools, and documentation in a
+ hierarchy
+ under $PREFIX, specified with <tt>./configure --prefix=[dir]</tt>, which
+ defaults to <tt>/usr/local</tt>.
+ <br><br>
+
+ <dt><tt>gmake -C runtime install-bytecode</tt>
+ <dd>
+ Assuming you built LLVM into $OBJDIR, when this command is run, it will
+ install bitcode libraries into the GCC front end's bitcode library
+ directory. If you need to update your bitcode libraries,
+ this is the target to use once you've built them.
+ <br><br>
+</dl>
+
+<p>Please see the <a href="MakefileGuide.html">Makefile Guide</a> for further
+details on these <tt>make</tt> targets and descriptions of other targets
+available.</p>
+
+<p>It is also possible to override default values from <tt>configure</tt> by
+declaring variables on the command line. The following are some examples:</p>
+
+<dl>
+ <dt><tt>gmake ENABLE_OPTIMIZED=1</tt>
+ <dd>
+ Perform a Release (Optimized) build.
+ <br><br>
+
+ <dt><tt>gmake ENABLE_OPTIMIZED=1 DISABLE_ASSERTIONS=1</tt>
+ <dd>
+ Perform a Release (Optimized) build without assertions enabled.
+ <br><br>
+
+ <dt><tt>gmake ENABLE_OPTIMIZED=0</tt>
+ <dd>
+ Perform a Debug build.
+ <br><br>
+
+ <dt><tt>gmake ENABLE_PROFILING=1</tt>
+ <dd>
+ Perform a Profiling build.
+ <br><br>
+
+ <dt><tt>gmake VERBOSE=1</tt>
+ <dd>
+ Print what <tt>gmake</tt> is doing on standard output.
+ <br><br>
+
+ <dt><tt>gmake TOOL_VERBOSE=1</tt></dt>
+ <dd>Ask each tool invoked by the makefiles to print out what it is doing on
+ the standard output. This also implies <tt>VERBOSE=1</tt>.
+ <br><br></dd>
+</dl>
+
+<p>Every directory in the LLVM object tree includes a <tt>Makefile</tt> to build
+it and any subdirectories that it contains. Entering any directory inside the
+LLVM object tree and typing <tt>gmake</tt> should rebuild anything in or below
+that directory that is out of date.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="cross-compile">Cross-Compiling LLVM</a>
+</h3>
+
+<div>
+ <p>It is possible to cross-compile LLVM itself. That is, you can create LLVM
+ executables and libraries to be hosted on a platform different from the
+ platform where they are build (a Canadian Cross build). To configure a
+ cross-compile, supply the configure script with <tt>--build</tt> and
+ <tt>--host</tt> options that are different. The values of these options must
+ be legal target triples that your GCC compiler supports.</p>
+
+ <p>The result of such a build is executables that are not runnable on
+ on the build host (--build option) but can be executed on the compile host
+ (--host option).</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="objfiles">The Location of LLVM Object Files</a>
+</h3>
+
+<div>
+
+<p>The LLVM build system is capable of sharing a single LLVM source tree among
+several LLVM builds. Hence, it is possible to build LLVM for several different
+platforms or configurations using the same source tree.</p>
+
+<p>This is accomplished in the typical autoconf manner:</p>
+
+<ul>
+ <li><p>Change directory to where the LLVM object files should live:</p>
+
+ <div class="doc_code"><pre>% cd <i>OBJ_ROOT</i></pre></div></li>
+
+ <li><p>Run the <tt>configure</tt> script found in the LLVM source
+ directory:</p>
+
+ <div class="doc_code"><pre>% <i>SRC_ROOT</i>/configure</pre></div></li>
+</ul>
+
+<p>The LLVM build will place files underneath <i>OBJ_ROOT</i> in directories
+named after the build type:</p>
+
+<dl>
+ <dt>Debug Builds with assertions enabled (the default)
+ <dd>
+ <dl>
+ <dt>Tools
+ <dd><tt><i>OBJ_ROOT</i>/Debug+Asserts/bin</tt>
+ <dt>Libraries
+ <dd><tt><i>OBJ_ROOT</i>/Debug+Asserts/lib</tt>
+ </dl>
+ <br><br>
+
+ <dt>Release Builds
+ <dd>
+ <dl>
+ <dt>Tools
+ <dd><tt><i>OBJ_ROOT</i>/Release/bin</tt>
+ <dt>Libraries
+ <dd><tt><i>OBJ_ROOT</i>/Release/lib</tt>
+ </dl>
+ <br><br>
+
+ <dt>Profile Builds
+ <dd>
+ <dl>
+ <dt>Tools
+ <dd><tt><i>OBJ_ROOT</i>/Profile/bin</tt>
+ <dt>Libraries
+ <dd><tt><i>OBJ_ROOT</i>/Profile/lib</tt>
+ </dl>
+</dl>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="optionalconfig">Optional Configuration Items</a>
+</h3>
+
+<div>
+
+<p>
+If you're running on a Linux system that supports the "<a
+href="http://www.tat.physik.uni-tuebingen.de/~rguenth/linux/binfmt_misc.html">binfmt_misc</a>"
+module, and you have root access on the system, you can set your system up to
+execute LLVM bitcode files directly. To do this, use commands like this (the
+first command may not be required if you are already using the module):</p>
+
+<div class="doc_code">
+<pre>
+$ mount -t binfmt_misc none /proc/sys/fs/binfmt_misc
+$ echo ':llvm:M::BC::/path/to/lli:' &gt; /proc/sys/fs/binfmt_misc/register
+$ chmod u+x hello.bc (if needed)
+$ ./hello.bc
+</pre>
+</div>
+
+<p>
+This allows you to execute LLVM bitcode files directly. On Debian, you
+can also use this command instead of the 'echo' command above:
+</p>
+
+<div class="doc_code">
+<pre>
+$ sudo update-binfmts --install llvm /path/to/lli --magic 'BC'
+</pre>
+</div>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="layout">Program Layout</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>One useful source of information about the LLVM source base is the LLVM <a
+href="http://www.doxygen.org/">doxygen</a> documentation available at <tt><a
+href="http://llvm.org/doxygen/">http://llvm.org/doxygen/</a></tt>.
+The following is a brief introduction to code layout:</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="examples"><tt>llvm/examples</tt></a>
+</h3>
+
+<div>
+ <p>This directory contains some simple examples of how to use the LLVM IR and
+ JIT.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="include"><tt>llvm/include</tt></a>
+</h3>
+
+<div>
+
+<p>This directory contains public header files exported from the LLVM
+library. The three main subdirectories of this directory are:</p>
+
+<dl>
+ <dt><tt><b>llvm/include/llvm</b></tt></dt>
+ <dd>This directory contains all of the LLVM specific header files. This
+ directory also has subdirectories for different portions of LLVM:
+ <tt>Analysis</tt>, <tt>CodeGen</tt>, <tt>Target</tt>, <tt>Transforms</tt>,
+ etc...</dd>
+
+ <dt><tt><b>llvm/include/llvm/Support</b></tt></dt>
+ <dd>This directory contains generic support libraries that are provided with
+ LLVM but not necessarily specific to LLVM. For example, some C++ STL utilities
+ and a Command Line option processing library store their header files here.
+ </dd>
+
+ <dt><tt><b>llvm/include/llvm/Config</b></tt></dt>
+ <dd>This directory contains header files configured by the <tt>configure</tt>
+ script. They wrap "standard" UNIX and C header files. Source code can
+ include these header files which automatically take care of the conditional
+ #includes that the <tt>configure</tt> script generates.</dd>
+</dl>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="lib"><tt>llvm/lib</tt></a>
+</h3>
+
+<div>
+
+<p>This directory contains most of the source files of the LLVM system. In LLVM,
+almost all code exists in libraries, making it very easy to share code among the
+different <a href="#tools">tools</a>.</p>
+
+<dl>
+ <dt><tt><b>llvm/lib/VMCore/</b></tt></dt>
+ <dd> This directory holds the core LLVM source files that implement core
+ classes like Instruction and BasicBlock.</dd>
+
+ <dt><tt><b>llvm/lib/AsmParser/</b></tt></dt>
+ <dd>This directory holds the source code for the LLVM assembly language parser
+ library.</dd>
+
+ <dt><tt><b>llvm/lib/BitCode/</b></tt></dt>
+ <dd>This directory holds code for reading and write LLVM bitcode.</dd>
+
+ <dt><tt><b>llvm/lib/Analysis/</b></tt><dd>This directory contains a variety of
+ different program analyses, such as Dominator Information, Call Graphs,
+ Induction Variables, Interval Identification, Natural Loop Identification,
+ etc.</dd>
+
+ <dt><tt><b>llvm/lib/Transforms/</b></tt></dt>
+ <dd> This directory contains the source code for the LLVM to LLVM program
+ transformations, such as Aggressive Dead Code Elimination, Sparse Conditional
+ Constant Propagation, Inlining, Loop Invariant Code Motion, Dead Global
+ Elimination, and many others.</dd>
+
+ <dt><tt><b>llvm/lib/Target/</b></tt></dt>
+ <dd> This directory contains files that describe various target architectures
+ for code generation. For example, the <tt>llvm/lib/Target/X86</tt>
+ directory holds the X86 machine description while
+ <tt>llvm/lib/Target/ARM</tt> implements the ARM backend.</dd>
+
+ <dt><tt><b>llvm/lib/CodeGen/</b></tt></dt>
+ <dd> This directory contains the major parts of the code generator: Instruction
+ Selector, Instruction Scheduling, and Register Allocation.</dd>
+
+ <dt><tt><b>llvm/lib/MC/</b></tt></dt>
+ <dd>(FIXME: T.B.D.)</dd>
+
+ <!--FIXME: obsoleted -->
+ <dt><tt><b>llvm/lib/Debugger/</b></tt></dt>
+ <dd> This directory contains the source level debugger library that makes
+ it possible to instrument LLVM programs so that a debugger could identify
+ source code locations at which the program is executing.</dd>
+
+ <dt><tt><b>llvm/lib/ExecutionEngine/</b></tt></dt>
+ <dd> This directory contains libraries for executing LLVM bitcode directly
+ at runtime in both interpreted and JIT compiled fashions.</dd>
+
+ <dt><tt><b>llvm/lib/Support/</b></tt></dt>
+ <dd> This directory contains the source code that corresponds to the header
+ files located in <tt>llvm/include/ADT/</tt>
+ and <tt>llvm/include/Support/</tt>.</dd>
+</dl>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="projects"><tt>llvm/projects</tt></a>
+</h3>
+
+<div>
+ <p>This directory contains projects that are not strictly part of LLVM but are
+ shipped with LLVM. This is also the directory where you should create your own
+ LLVM-based projects. See <tt>llvm/projects/sample</tt> for an example of how
+ to set up your own project.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="runtime"><tt>llvm/runtime</tt></a>
+</h3>
+
+<div>
+
+<p>This directory contains libraries which are compiled into LLVM bitcode and
+used when linking programs with the Clang front end. Most of these libraries are
+skeleton versions of real libraries; for example, libc is a stripped down
+version of glibc.</p>
+
+<p>Unlike the rest of the LLVM suite, this directory needs the LLVM GCC front
+end to compile.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="test"><tt>llvm/test</tt></a>
+</h3>
+
+<div>
+ <p>This directory contains feature and regression tests and other basic sanity
+ checks on the LLVM infrastructure. These are intended to run quickly and cover
+ a lot of territory without being exhaustive.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="test-suite"><tt>test-suite</tt></a>
+</h3>
+
+<div>
+ <p>This is not a directory in the normal llvm module; it is a separate
+ Subversion
+ module that must be checked out (usually to <tt>projects/test-suite</tt>).
+ This
+ module contains a comprehensive correctness, performance, and benchmarking
+ test
+ suite for LLVM. It is a separate Subversion module because not every LLVM
+ user is
+ interested in downloading or building such a comprehensive test suite. For
+ further details on this test suite, please see the
+ <a href="TestingGuide.html">Testing Guide</a> document.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="tools"><tt>llvm/tools</tt></a>
+</h3>
+
+<div>
+
+<p>The <b>tools</b> directory contains the executables built out of the
+libraries above, which form the main part of the user interface. You can
+always get help for a tool by typing <tt>tool_name -help</tt>. The
+following is a brief introduction to the most important tools. More detailed
+information is in the <a href="CommandGuide/index.html">Command Guide</a>.</p>
+
+<dl>
+
+ <dt><tt><b>bugpoint</b></tt></dt>
+ <dd><tt>bugpoint</tt> is used to debug
+ optimization passes or code generation backends by narrowing down the
+ given test case to the minimum number of passes and/or instructions that
+ still cause a problem, whether it is a crash or miscompilation. See <a
+ href="HowToSubmitABug.html">HowToSubmitABug.html</a> for more information
+ on using <tt>bugpoint</tt>.</dd>
+
+ <dt><tt><b>llvm-ar</b></tt></dt>
+ <dd>The archiver produces an archive containing
+ the given LLVM bitcode files, optionally with an index for faster
+ lookup.</dd>
+
+ <dt><tt><b>llvm-as</b></tt></dt>
+ <dd>The assembler transforms the human readable LLVM assembly to LLVM
+ bitcode.</dd>
+
+ <dt><tt><b>llvm-dis</b></tt></dt>
+ <dd>The disassembler transforms the LLVM bitcode to human readable
+ LLVM assembly.</dd>
+
+ <dt><tt><b>llvm-link</b></tt></dt>
+ <dd><tt>llvm-link</tt>, not surprisingly, links multiple LLVM modules into
+ a single program.</dd>
+
+ <dt><tt><b>lli</b></tt></dt>
+ <dd><tt>lli</tt> is the LLVM interpreter, which
+ can directly execute LLVM bitcode (although very slowly...). For architectures
+ that support it (currently x86, Sparc, and PowerPC), by default, <tt>lli</tt>
+ will function as a Just-In-Time compiler (if the functionality was compiled
+ in), and will execute the code <i>much</i> faster than the interpreter.</dd>
+
+ <dt><tt><b>llc</b></tt></dt>
+ <dd> <tt>llc</tt> is the LLVM backend compiler, which
+ translates LLVM bitcode to a native code assembly file or to C code (with
+ the -march=c option).</dd>
+
+ <dt><tt><b>llvm-gcc</b></tt></dt>
+ <dd><tt>llvm-gcc</tt> is a GCC-based C frontend that has been retargeted to
+ use LLVM as its backend instead of GCC's RTL backend. It can also emit LLVM
+ bitcode or assembly (with the <tt>-emit-llvm</tt> option) instead of the
+ usual machine code output. It works just like any other GCC compiler,
+ taking the typical <tt>-c, -S, -E, -o</tt> options that are typically used.
+ Additionally, the source code for <tt>llvm-gcc</tt> is available as a
+ separate Subversion module.</dd>
+
+ <dt><tt><b>opt</b></tt></dt>
+ <dd><tt>opt</tt> reads LLVM bitcode, applies a series of LLVM to LLVM
+ transformations (which are specified on the command line), and then outputs
+ the resultant bitcode. The '<tt>opt -help</tt>' command is a good way to
+ get a list of the program transformations available in LLVM.<br>
+ <dd><tt>opt</tt> can also be used to run a specific analysis on an input
+ LLVM bitcode file and print out the results. It is primarily useful for
+ debugging analyses, or familiarizing yourself with what an analysis does.</dd>
+</dl>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="utils"><tt>llvm/utils</tt></a>
+</h3>
+
+<div>
+
+<p>This directory contains utilities for working with LLVM source code, and some
+of the utilities are actually required as part of the build process because they
+are code generators for parts of LLVM infrastructure.</p>
+
+<dl>
+ <dt><tt><b>codegen-diff</b></tt> <dd><tt>codegen-diff</tt> is a script
+ that finds differences between code that LLC generates and code that LLI
+ generates. This is a useful tool if you are debugging one of them,
+ assuming that the other generates correct output. For the full user
+ manual, run <tt>`perldoc codegen-diff'</tt>.<br><br>
+
+ <dt><tt><b>emacs/</b></tt> <dd>The <tt>emacs</tt> directory contains
+ syntax-highlighting files which will work with Emacs and XEmacs editors,
+ providing syntax highlighting support for LLVM assembly files and TableGen
+ description files. For information on how to use the syntax files, consult
+ the <tt>README</tt> file in that directory.<br><br>
+
+ <dt><tt><b>getsrcs.sh</b></tt> <dd>The <tt>getsrcs.sh</tt> script finds
+ and outputs all non-generated source files, which is useful if one wishes
+ to do a lot of development across directories and does not want to
+ individually find each file. One way to use it is to run, for example:
+ <tt>xemacs `utils/getsources.sh`</tt> from the top of your LLVM source
+ tree.<br><br>
+
+ <dt><tt><b>llvmgrep</b></tt></dt>
+ <dd>This little tool performs an "egrep -H -n" on each source file in LLVM and
+ passes to it a regular expression provided on <tt>llvmgrep</tt>'s command
+ line. This is a very efficient way of searching the source base for a
+ particular regular expression.</dd>
+
+ <dt><tt><b>makellvm</b></tt> <dd>The <tt>makellvm</tt> script compiles all
+ files in the current directory and then compiles and links the tool that
+ is the first argument. For example, assuming you are in the directory
+ <tt>llvm/lib/Target/Sparc</tt>, if <tt>makellvm</tt> is in your path,
+ simply running <tt>makellvm llc</tt> will make a build of the current
+ directory, switch to directory <tt>llvm/tools/llc</tt> and build it,
+ causing a re-linking of LLC.<br><br>
+
+ <dt><tt><b>TableGen/</b></tt> <dd>The <tt>TableGen</tt> directory contains
+ the tool used to generate register descriptions, instruction set
+ descriptions, and even assemblers from common TableGen description
+ files.<br><br>
+
+ <dt><tt><b>vim/</b></tt> <dd>The <tt>vim</tt> directory contains
+ syntax-highlighting files which will work with the VIM editor, providing
+ syntax highlighting support for LLVM assembly files and TableGen
+ description files. For information on how to use the syntax files, consult
+ the <tt>README</tt> file in that directory.<br><br>
+
+</dl>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="tutorial">An Example Using the LLVM Tool Chain</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+<p>This section gives an example of using LLVM with the Clang front end.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="tutorial4">Example with clang</a>
+</h3>
+
+<div>
+
+<ol>
+ <li><p>First, create a simple C file, name it 'hello.c':</p>
+
+<div class="doc_code">
+<pre>
+#include &lt;stdio.h&gt;
+
+int main() {
+ printf("hello world\n");
+ return 0;
+}
+</pre></div></li>
+
+ <li><p>Next, compile the C file into a native executable:</p>
+
+ <div class="doc_code"><pre>% clang hello.c -o hello</pre></div>
+
+ <p>Note that clang works just like GCC by default. The standard -S and
+ -c arguments work as usual (producing a native .s or .o file,
+ respectively).</p></li>
+
+ <li><p>Next, compile the C file into a LLVM bitcode file:</p>
+
+ <div class="doc_code">
+ <pre>% clang -O3 -emit-llvm hello.c -c -o hello.bc</pre></div>
+
+ <p>The -emit-llvm option can be used with the -S or -c options to emit an
+ LLVM ".ll" or ".bc" file (respectively) for the code. This allows you
+ to use the <a href="CommandGuide/index.html">standard LLVM tools</a> on
+ the bitcode file.</p></li>
+
+ <li><p>Run the program in both forms. To run the program, use:</p>
+
+ <div class="doc_code"><pre>% ./hello</pre></div>
+
+ <p>and</p>
+
+ <div class="doc_code"><pre>% lli hello.bc</pre></div>
+
+ <p>The second examples shows how to invoke the LLVM JIT, <a
+ href="CommandGuide/html/lli.html">lli</a>.</p></li>
+
+ <li><p>Use the <tt>llvm-dis</tt> utility to take a look at the LLVM assembly
+ code:</p>
+
+<div class="doc_code">
+<pre>llvm-dis &lt; hello.bc | less</pre>
+</div></li>
+
+ <li><p>Compile the program to native assembly using the LLC code
+ generator:</p>
+
+ <div class="doc_code"><pre>% llc hello.bc -o hello.s</pre></div></li>
+
+ <li><p>Assemble the native assembly language file into a program:</p>
+
+<div class="doc_code">
+<pre>
+<b>Solaris:</b> % /opt/SUNWspro/bin/cc -xarch=v9 hello.s -o hello.native
+
+<b>Others:</b> % gcc hello.s -o hello.native
+</pre>
+</div></li>
+
+ <li><p>Execute the native code program:</p>
+
+ <div class="doc_code"><pre>% ./hello.native</pre></div>
+
+ <p>Note that using clang to compile directly to native code (i.e. when
+ the -emit-llvm option is not present) does steps 6/7/8 for you.</p>
+ </li>
+
+</ol>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="problems">Common Problems</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>If you are having problems building or using LLVM, or if you have any other
+general questions about LLVM, please consult the <a href="FAQ.html">Frequently
+Asked Questions</a> page.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="links">Links</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>This document is just an <b>introduction</b> on how to use LLVM to do
+some simple things... there are many more interesting and complicated things
+that you can do that aren't documented here (but we'll gladly accept a patch
+if you want to write something up!). For more information about LLVM, check
+out:</p>
+
+<ul>
+ <li><a href="http://llvm.org/">LLVM homepage</a></li>
+ <li><a href="http://llvm.org/doxygen/">LLVM doxygen tree</a></li>
+ <li><a href="http://llvm.org/docs/Projects.html">Starting a Project
+ that Uses LLVM</a></li>
+</ul>
+
+</div>
+
+<!-- *********************************************************************** -->
+
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.x10sys.com/rspencer/">Reid Spencer</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/GettingStartedVS.rst b/docs/GettingStartedVS.rst
new file mode 100644
index 00000000000..35f97f04b9d
--- /dev/null
+++ b/docs/GettingStartedVS.rst
@@ -0,0 +1,234 @@
+.. _winvs:
+
+==================================================================
+Getting Started with the LLVM System using Microsoft Visual Studio
+==================================================================
+
+.. contents::
+ :local:
+
+
+Overview
+========
+Welcome to LLVM on Windows! This document only covers LLVM on Windows using
+Visual Studio, not mingw or cygwin. In order to get started, you first need to
+know some basic information.
+
+There are many different projects that compose LLVM. The first is the LLVM
+suite. This contains all of the tools, libraries, and header files needed to
+use LLVM. It contains an assembler, disassembler,
+bitcode analyzer and bitcode optimizer. It also contains a test suite that can
+be used to test the LLVM tools.
+
+Another useful project on Windows is `Clang <http://clang.llvm.org/>`_.
+Clang is a C family ([Objective]C/C++) compiler. Clang mostly works on
+Windows, but does not currently understand all of the Microsoft extensions
+to C and C++. Because of this, clang cannot parse the C++ standard library
+included with Visual Studio, nor parts of the Windows Platform SDK. However,
+most standard C programs do compile. Clang can be used to emit bitcode,
+directly emit object files or even linked executables using Visual Studio's
+``link.exe``.
+
+The large LLVM test suite cannot be run on the Visual Studio port at this
+time.
+
+Most of the tools build and work. ``bugpoint`` does build, but does
+not work.
+
+Additional information about the LLVM directory structure and tool chain
+can be found on the main `Getting Started <GettingStarted.html>`_ page.
+
+
+Requirements
+============
+Before you begin to use the LLVM system, review the requirements given
+below. This may save you some trouble by knowing ahead of time what hardware
+and software you will need.
+
+Hardware
+--------
+Any system that can adequately run Visual Studio 2008 is fine. The LLVM
+source tree and object files, libraries and executables will consume
+approximately 3GB.
+
+Software
+--------
+You will need Visual Studio 2008 or higher. Earlier versions of Visual
+Studio have bugs, are not completely compatible, or do not support the C++
+standard well enough.
+
+You will also need the `CMake <http://www.cmake.org/>`_ build system since it
+generates the project files you will use to build with.
+
+If you would like to run the LLVM tests you will need `Python
+<http://www.python.org/>`_. Versions 2.4-2.7 are known to work. You will need
+`GnuWin32 <http://gnuwin32.sourceforge.net/>`_ tools, too.
+
+Do not install the LLVM directory tree into a path containing spaces (e.g.
+``C:\Documents and Settings\...``) as the configure step will fail.
+
+
+Getting Started
+===============
+Here's the short story for getting up and running quickly with LLVM:
+
+1. Read the documentation.
+2. Seriously, read the documentation.
+3. Remember that you were warned twice about reading the documentation.
+4. Get the Source Code
+
+ * With the distributed files:
+
+ 1. ``cd <where-you-want-llvm-to-live>``
+ 2. ``gunzip --stdout llvm-VERSION.tar.gz | tar -xvf -``
+ (*or use WinZip*)
+ 3. ``cd llvm``
+
+ * With anonymous Subversion access:
+
+ 1. ``cd <where-you-want-llvm-to-live>``
+ 2. ``svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm``
+ 3. ``cd llvm``
+
+5. Use `CMake <http://www.cmake.org/>`_ to generate up-to-date project files:
+
+ * Once CMake is installed then the simplest way is to just start the
+ CMake GUI, select the directory where you have LLVM extracted to, and
+ the default options should all be fine. One option you may really
+ want to change, regardless of anything else, might be the
+ ``CMAKE_INSTALL_PREFIX`` setting to select a directory to INSTALL to
+ once compiling is complete, although installation is not mandatory for
+ using LLVM. Another important option is ``LLVM_TARGETS_TO_BUILD``,
+ which controls the LLVM target architectures that are included on the
+ build.
+ * See the `LLVM CMake guide <CMake.html>`_ for detailed information about
+ how to configure the LLVM build.
+
+6. Start Visual Studio
+
+ * In the directory you created the project files will have an ``llvm.sln``
+ file, just double-click on that to open Visual Studio.
+
+7. Build the LLVM Suite:
+
+ * The projects may still be built individually, but to build them all do
+ not just select all of them in batch build (as some are meant as
+ configuration projects), but rather select and build just the
+ ``ALL_BUILD`` project to build everything, or the ``INSTALL`` project,
+ which first builds the ``ALL_BUILD`` project, then installs the LLVM
+ headers, libs, and other useful things to the directory set by the
+ ``CMAKE_INSTALL_PREFIX`` setting when you first configured CMake.
+ * The Fibonacci project is a sample program that uses the JIT. Modify the
+ project's debugging properties to provide a numeric command line argument
+ or run it from the command line. The program will print the
+ corresponding fibonacci value.
+
+8. Test LLVM on Visual Studio:
+
+ * If ``%PATH%`` does not contain GnuWin32, you may specify
+ ``LLVM_LIT_TOOLS_DIR`` on CMake for the path to GnuWin32.
+ * You can run LLVM tests by merely building the project "check". The test
+ results will be shown in the VS output window.
+
+.. FIXME: Is it up-to-date?
+
+9. Test LLVM:
+
+ * The LLVM tests can be run by changing directory to the llvm source
+ directory and running:
+
+ .. code-block:: bat
+
+ C:\..\llvm> llvm-lit test
+
+ Note that quite a few of these test will fail.
+
+ A specific test or test directory can be run with:
+
+ .. code-block:: bat
+
+ C:\..\llvm> llvm-lit test/path/to/test
+
+
+An Example Using the LLVM Tool Chain
+====================================
+
+1. First, create a simple C file, name it '``hello.c``':
+
+ .. code-block:: c
+
+ #include <stdio.h>
+ int main() {
+ printf("hello world\n");
+ return 0;
+ }
+
+2. Next, compile the C file into a LLVM bitcode file:
+
+ .. code-block:: bat
+
+ C:\..> clang -c hello.c -emit-llvm -o hello.bc
+
+ This will create the result file ``hello.bc`` which is the LLVM bitcode
+ that corresponds the compiled program and the library facilities that
+ it required. You can execute this file directly using ``lli`` tool,
+ compile it to native assembly with the ``llc``, optimize or analyze it
+ further with the ``opt`` tool, etc.
+
+ Alternatively you can directly output an executable with clang with:
+
+ .. code-block:: bat
+
+ C:\..> clang hello.c -o hello.exe
+
+ The ``-o hello.exe`` is required because clang currently outputs ``a.out``
+ when neither ``-o`` nor ``-c`` are given.
+
+3. Run the program using the just-in-time compiler:
+
+ .. code-block:: bat
+
+ C:\..> lli hello.bc
+
+4. Use the ``llvm-dis`` utility to take a look at the LLVM assembly code:
+
+ .. code-block:: bat
+
+ C:\..> llvm-dis < hello.bc | more
+
+5. Compile the program to object code using the LLC code generator:
+
+ .. code-block:: bat
+
+ C:\..> llc -filetype=obj hello.bc
+
+6. Link to binary using Microsoft link:
+
+ .. code-block:: bat
+
+ C:\..> link hello.obj -defaultlib:libcmt
+
+7. Execute the native code program:
+
+ .. code-block:: bat
+
+ C:\..> hello.exe
+
+
+Common Problems
+===============
+If you are having problems building or using LLVM, or if you have any other
+general questions about LLVM, please consult the `Frequently Asked Questions
+<FAQ.html>`_ page.
+
+
+Links
+=====
+This document is just an **introduction** to how to use LLVM to do some simple
+things... there are many more interesting and complicated things that you can
+do that aren't documented here (but we'll gladly accept a patch if you want to
+write something up!). For more information about LLVM, check out:
+
+* `LLVM homepage <http://llvm.org/>`_
+* `LLVM doxygen tree <http://llvm.org/doxygen/>`_
+
diff --git a/docs/GoldPlugin.html b/docs/GoldPlugin.html
new file mode 100644
index 00000000000..1e99a5a3d6a
--- /dev/null
+++ b/docs/GoldPlugin.html
@@ -0,0 +1,227 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>LLVM gold plugin</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+<body>
+
+<h1>LLVM gold plugin</h1>
+<ol>
+ <li><a href="#introduction">Introduction</a></li>
+ <li><a href="#build">How to build it</a></li>
+ <li><a href="#usage">Usage</a>
+ <ul>
+ <li><a href="#example1">Example of link time optimization</a></li>
+ <li><a href="#lto_autotools">Quickstart for using LTO with autotooled projects</a></li>
+ </ul></li>
+ <li><a href="#licensing">Licensing</a></li>
+</ol>
+<div class="doc_author">Written by Nick Lewycky</div>
+
+<!--=========================================================================-->
+<h2><a name="introduction">Introduction</a></h2>
+<!--=========================================================================-->
+<div>
+ <p>Building with link time optimization requires cooperation from the
+system linker. LTO support on Linux systems requires that you use
+the <a href="http://sourceware.org/binutils">gold linker</a> which supports
+LTO via plugins. This is the same mechanism used by the
+<a href="http://gcc.gnu.org/wiki/LinkTimeOptimization">GCC LTO</a>
+project.</p>
+ <p>The LLVM gold plugin implements the
+<a href="http://gcc.gnu.org/wiki/whopr/driver">gold plugin interface</a>
+on top of
+<a href="LinkTimeOptimization.html#lto">libLTO</a>.
+The same plugin can also be used by other tools such as <tt>ar</tt> and
+<tt>nm</tt>.
+</div>
+<!--=========================================================================-->
+<h2><a name="build">How to build it</a></h2>
+<!--=========================================================================-->
+<div>
+ <p>You need to have gold with plugin support and build the LLVMgold
+plugin. Check whether you have gold running <tt>/usr/bin/ld -v</tt>. It will
+report &#8220;GNU gold&#8221; or else &#8220GNU ld&#8221; if not. If you have
+gold, check for plugin support by running <tt>/usr/bin/ld -plugin</tt>. If it
+complains &#8220missing argument&#8221 then you have plugin support. If not,
+such as an &#8220;unknown option&#8221; error then you will either need to
+build gold or install a version with plugin support.</p>
+<ul>
+ <li>To build gold with plugin support:
+ <pre class="doc_code">
+mkdir binutils
+cd binutils
+cvs -z 9 -d :pserver:anoncvs@sourceware.org:/cvs/src login
+<em>{enter "anoncvs" as the password}</em>
+cvs -z 9 -d :pserver:anoncvs@sourceware.org:/cvs/src co binutils
+mkdir build
+cd build
+../src/configure --enable-gold --enable-plugins
+make all-gold
+</pre>
+ That should leave you with <tt>binutils/build/gold/ld-new</tt> which supports the <tt>-plugin</tt> option. It also built would have
+<tt>binutils/build/binutils/ar</tt> and <tt>nm-new</tt> which support plugins
+but don't have a visible -plugin option, instead relying on the gold plugin
+being present in <tt>../lib/bfd-plugins</tt> relative to where the binaries are
+placed.
+ <li>Build the LLVMgold plugin: Configure LLVM with
+ <tt>--with-binutils-include=/path/to/binutils/src/include</tt> and run
+ <tt>make</tt>.
+</ul>
+</div>
+<!--=========================================================================-->
+<h2><a name="usage">Usage</a></h2>
+<!--=========================================================================-->
+<div>
+
+ <p>The linker takes a <tt>-plugin</tt> option that points to the path of
+ the plugin <tt>.so</tt> file. To find out what link command <tt>gcc</tt>
+ would run in a given situation, run <tt>gcc -v <em>[...]</em></tt> and look
+ for the line where it runs <tt>collect2</tt>. Replace that with
+ <tt>ld-new -plugin /path/to/LLVMgold.so</tt> to test it out. Once you're
+ ready to switch to using gold, backup your existing <tt>/usr/bin/ld</tt>
+ then replace it with <tt>ld-new</tt>.</p>
+
+ <p>You can produce bitcode files from <tt>clang</tt> using
+ <tt>-emit-llvm</tt> or <tt>-flto</tt>, or the <tt>-O4</tt> flag which is
+ synonymous with <tt>-O3 -flto</tt>.</p>
+
+ <p>Any of these flags will also cause <tt>clang</tt> to look for the
+ gold plugin in the <tt>lib</tt> directory under its prefix and pass the
+ <tt>-plugin</tt> option to <tt>ld</tt>. It will not look for an alternate
+ linker, which is why you need gold to be the installed system linker in
+ your path.</p>
+
+ <p>If you want <tt>ar</tt> and <tt>nm</tt> to work seamlessly as well, install
+ <tt>LLVMgold.so</tt> to <tt>/usr/lib/bfd-plugins</tt>. If you built your
+ own gold, be sure to install the <tt>ar</tt> and <tt>nm-new</tt> you built to
+ <tt>/usr/bin</tt>.<p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="example1">Example of link time optimization</a>
+</h3>
+
+<div>
+ <p>The following example shows a worked example of the gold plugin mixing
+ LLVM bitcode and native code.
+<pre class="doc_code">
+--- a.c ---
+#include &lt;stdio.h&gt;
+
+extern void foo1(void);
+extern void foo4(void);
+
+void foo2(void) {
+ printf("Foo2\n");
+}
+
+void foo3(void) {
+ foo4();
+}
+
+int main(void) {
+ foo1();
+}
+
+--- b.c ---
+#include &lt;stdio.h&gt;
+
+extern void foo2(void);
+
+void foo1(void) {
+ foo2();
+}
+
+void foo4(void) {
+ printf("Foo4");
+}
+
+--- command lines ---
+$ clang -flto a.c -c -o a.o # &lt;-- a.o is LLVM bitcode file
+$ ar q a.a a.o # &lt;-- a.a is an archive with LLVM bitcode
+$ clang b.c -c -o b.o # &lt;-- b.o is native object file
+$ clang -flto a.a b.o -o main # &lt;-- link with LLVMgold plugin
+</pre>
+
+ <p>Gold informs the plugin that foo3 is never referenced outside the IR,
+ leading LLVM to delete that function. However, unlike in the
+ <a href="LinkTimeOptimization.html#example1">libLTO
+ example</a> gold does not currently eliminate foo4.</p>
+</div>
+
+</div>
+
+<!--=========================================================================-->
+<h2>
+ <a name="lto_autotools">
+ Quickstart for using LTO with autotooled projects
+ </a>
+</h2>
+<!--=========================================================================-->
+<div>
+ <p>Once your system <tt>ld</tt>, <tt>ar</tt>, and <tt>nm</tt> all support LLVM
+ bitcode, everything is in place for an easy to use LTO build of autotooled
+ projects:</p>
+
+ <ul>
+ <li>Follow the instructions <a href="#build">on how to build LLVMgold.so</a>.</li>
+ <li>Install the newly built binutils to <tt>$PREFIX</tt></li>
+ <li>Copy <tt>Release/lib/LLVMgold.so</tt> to
+ <tt>$PREFIX/lib/bfd-plugins/</tt></li>
+ <li>Set environment variables (<tt>$PREFIX</tt> is where you installed clang and
+ binutils):
+<pre class="doc_code">
+export CC="$PREFIX/bin/clang -flto"
+export CXX="$PREFIX/bin/clang++ -flto"
+export AR="$PREFIX/bin/ar"
+export NM="$PREFIX/bin/nm"
+export RANLIB=/bin/true #ranlib is not needed, and doesn't support .bc files in .a
+export CFLAGS="-O4"
+</pre>
+ </li>
+ <li>Or you can just set your path:
+<pre class="doc_code">
+export PATH="$PREFIX/bin:$PATH"
+export CC="clang -flto"
+export CXX="clang++ -flto"
+export RANLIB=/bin/true
+export CFLAGS="-O4"
+</pre></li>
+ <li>Configure &amp; build the project as usual:
+<pre class="doc_code">
+% ./configure &amp;&amp; make &amp;&amp; make check
+</pre></li>
+ </ul>
+
+ <p>The environment variable settings may work for non-autotooled projects
+ too, but you may need to set the <tt>LD</tt> environment variable as
+ well.</p>
+</div>
+
+<!--=========================================================================-->
+<h2><a name="licensing">Licensing</a></h2>
+<!--=========================================================================-->
+<div>
+ <p>Gold is licensed under the GPLv3. LLVMgold uses the interface file
+<tt>plugin-api.h</tt> from gold which means that the resulting LLVMgold.so
+binary is also GPLv3. This can still be used to link non-GPLv3 programs just
+as much as gold could without the plugin.</p>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+ <a href="mailto:nicholas@metrix.on.ca">Nick Lewycky</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date: 2010-04-16 23:58:21 -0800 (Fri, 16 Apr 2010) $
+</address>
+</body>
+</html>
diff --git a/docs/HistoricalNotes/2000-11-18-EarlyDesignIdeas.txt b/docs/HistoricalNotes/2000-11-18-EarlyDesignIdeas.txt
new file mode 100644
index 00000000000..f0861811920
--- /dev/null
+++ b/docs/HistoricalNotes/2000-11-18-EarlyDesignIdeas.txt
@@ -0,0 +1,74 @@
+Date: Sat, 18 Nov 2000 09:19:35 -0600 (CST)
+From: Vikram Adve <vadve@cs.uiuc.edu>
+To: Chris Lattner <lattner@cs.uiuc.edu>
+Subject: a few thoughts
+
+I've been mulling over the virtual machine problem and I had some
+thoughts about some things for us to think about discuss:
+
+1. We need to be clear on our goals for the VM. Do we want to emphasize
+ portability and safety like the Java VM? Or shall we focus on the
+ architecture interface first (i.e., consider the code generation and
+ processor issues), since the architecture interface question is also
+ important for portable Java-type VMs?
+
+ This is important because the audiences for these two goals are very
+ different. Architects and many compiler people care much more about
+ the second question. The Java compiler and OS community care much more
+ about the first one.
+
+ Also, while the architecture interface question is important for
+ Java-type VMs, the design constraints are very different.
+
+
+2. Design issues to consider (an initial list that we should continue
+ to modify). Note that I'm not trying to suggest actual solutions here,
+ but just various directions we can pursue:
+
+ a. A single-assignment VM, which we've both already been thinking about.
+
+ b. A strongly-typed VM. One question is do we need the types to be
+ explicitly declared or should they be inferred by the dynamic compiler?
+
+ c. How do we get more high-level information into the VM while keeping
+ to a low-level VM design?
+
+ o Explicit array references as operands? An alternative is
+ to have just an array type, and let the index computations be
+ separate 3-operand instructions.
+
+ o Explicit instructions to handle aliasing, e.g.s:
+ -- an instruction to say "I speculate that these two values are not
+ aliased, but check at runtime", like speculative execution in
+ EPIC?
+ -- or an instruction to check whether two values are aliased and
+ execute different code depending on the answer, somewhat like
+ predicated code in EPIC
+
+ o (This one is a difficult but powerful idea.)
+ A "thread-id" field on every instruction that allows the static
+ compiler to generate a set of parallel threads, and then have
+ the runtime compiler and hardware do what they please with it.
+ This has very powerful uses, but thread-id on every instruction
+ is expensive in terms of instruction size and code size.
+ We would need to compactly encode it somehow.
+
+ Also, this will require some reading on at least two other
+ projects:
+ -- Multiscalar architecture from Wisconsin
+ -- Simultaneous multithreading architecture from Washington
+
+ o Or forget all this and stick to a traditional instruction set?
+
+
+BTW, on an unrelated note, after the meeting yesterday, I did remember
+that you had suggested doing instruction scheduling on SSA form instead
+of a dependence DAG earlier in the semester. When we talked about
+it yesterday, I didn't remember where the idea had come from but I
+remembered later. Just giving credit where its due...
+
+Perhaps you can save the above as a file under RCS so you and I can
+continue to expand on this.
+
+--Vikram
+
diff --git a/docs/HistoricalNotes/2000-11-18-EarlyDesignIdeasResp.txt b/docs/HistoricalNotes/2000-11-18-EarlyDesignIdeasResp.txt
new file mode 100644
index 00000000000..81ca53919d4
--- /dev/null
+++ b/docs/HistoricalNotes/2000-11-18-EarlyDesignIdeasResp.txt
@@ -0,0 +1,199 @@
+Date: Sun, 19 Nov 2000 16:23:57 -0600 (CST)
+From: Chris Lattner <sabre@nondot.org>
+To: Vikram Adve <vadve@cs.uiuc.edu>
+Subject: Re: a few thoughts
+
+Okay... here are a few of my thoughts on this (it's good to know that we
+think so alike!):
+
+> 1. We need to be clear on our goals for the VM. Do we want to emphasize
+> portability and safety like the Java VM? Or shall we focus on the
+> architecture interface first (i.e., consider the code generation and
+> processor issues), since the architecture interface question is also
+> important for portable Java-type VMs?
+
+I forsee the architecture looking kinda like this: (which is completely
+subject to change)
+
+1. The VM code is NOT guaranteed safe in a java sense. Doing so makes it
+ basically impossible to support C like languages. Besides that,
+ certifying a register based language as safe at run time would be a
+ pretty expensive operation to have to do. Additionally, we would like
+ to be able to statically eliminate many bounds checks in Java
+ programs... for example.
+
+ 2. Instead, we can do the following (eventually):
+ * Java bytecode is used as our "safe" representation (to avoid
+ reinventing something that we don't add much value to). When the
+ user chooses to execute Java bytecodes directly (ie, not
+ precompiled) the runtime compiler can do some very simple
+ transformations (JIT style) to convert it into valid input for our
+ VM. Performance is not wonderful, but it works right.
+ * The file is scheduled to be compiled (rigorously) at a later
+ time. This could be done by some background process or by a second
+ processor in the system during idle time or something...
+ * To keep things "safe" ie to enforce a sandbox on Java/foreign code,
+ we could sign the generated VM code with a host specific private
+ key. Then before the code is executed/loaded, we can check to see if
+ the trusted compiler generated the code. This would be much quicker
+ than having to validate consistency (especially if bounds checks have
+ been removed, for example)
+
+> This is important because the audiences for these two goals are very
+> different. Architects and many compiler people care much more about
+> the second question. The Java compiler and OS community care much more
+> about the first one.
+
+3. By focusing on a more low level virtual machine, we have much more room
+ for value add. The nice safe "sandbox" VM can be provided as a layer
+ on top of it. It also lets us focus on the more interesting compilers
+ related projects.
+
+> 2. Design issues to consider (an initial list that we should continue
+> to modify). Note that I'm not trying to suggest actual solutions here,
+> but just various directions we can pursue:
+
+Understood. :)
+
+> a. A single-assignment VM, which we've both already been thinking
+> about.
+
+Yup, I think that this makes a lot of sense. I am still intrigued,
+however, by the prospect of a minimally allocated VM representation... I
+think that it could have definite advantages for certain applications
+(think very small machines, like PDAs). I don't, however, think that our
+initial implementations should focus on this. :)
+
+Here are some other auxiliary goals that I think we should consider:
+
+1. Primary goal: Support a high performance dynamic compilation
+ system. This means that we have an "ideal" division of labor between
+ the runtime and static compilers. Of course, the other goals of the
+ system somewhat reduce the importance of this point (f.e. portability
+ reduces performance, but hopefully not much)
+2. Portability to different processors. Since we are most familiar with
+ x86 and solaris, I think that these two are excellent candidates when
+ we get that far...
+3. Support for all languages & styles of programming (general purpose
+ VM). This is the point that disallows java style bytecodes, where all
+ array refs are checked for bounds, etc...
+4. Support linking between different language families. For example, call
+ C functions directly from Java without using the nasty/slow/gross JNI
+ layer. This involves several subpoints:
+ A. Support for languages that require garbage collectors and integration
+ with languages that don't. As a base point, we could insist on
+ always using a conservative GC, but implement free as a noop, f.e.
+
+> b. A strongly-typed VM. One question is do we need the types to be
+> explicitly declared or should they be inferred by the dynamic
+> compiler?
+
+ B. This is kind of similar to another idea that I have: make OOP
+ constructs (virtual function tables, class heirarchies, etc) explicit
+ in the VM representation. I believe that the number of additional
+ constructs would be fairly low, but would give us lots of important
+ information... something else that would/could be important is to
+ have exceptions as first class types so that they would be handled in
+ a uniform way for the entire VM... so that C functions can call Java
+ functions for example...
+
+> c. How do we get more high-level information into the VM while keeping
+> to a low-level VM design?
+> o Explicit array references as operands? An alternative is
+> to have just an array type, and let the index computations be
+> separate 3-operand instructions.
+
+ C. In the model I was thinking of (subject to change of course), we
+ would just have an array type (distinct from the pointer
+ types). This would allow us to have arbitrarily complex index
+ expressions, while still distinguishing "load" from "Array load",
+ for example. Perhaps also, switch jump tables would be first class
+ types as well? This would allow better reasoning about the program.
+
+5. Support dynamic loading of code from various sources. Already
+ mentioned above was the example of loading java bytecodes, but we want
+ to support dynamic loading of VM code as well. This makes the job of
+ the runtime compiler much more interesting: it can do interprocedural
+ optimizations that the static compiler can't do, because it doesn't
+ have all of the required information (for example, inlining from
+ shared libraries, etc...)
+
+6. Define a set of generally useful annotations to add to the VM
+ representation. For example, a function can be analysed to see if it
+ has any sideeffects when run... also, the MOD/REF sets could be
+ calculated, etc... we would have to determine what is reasonable. This
+ would generally be used to make IP optimizations cheaper for the
+ runtime compiler...
+
+> o Explicit instructions to handle aliasing, e.g.s:
+> -- an instruction to say "I speculate that these two values are not
+> aliased, but check at runtime", like speculative execution in
+> EPIC?
+> -- or an instruction to check whether two values are aliased and
+> execute different code depending on the answer, somewhat like
+> predicated code in EPIC
+
+These are also very good points... if this can be determined at compile
+time. I think that an epic style of representation (not the instruction
+packing, just the information presented) could be a very interesting model
+to use... more later...
+
+> o (This one is a difficult but powerful idea.)
+> A "thread-id" field on every instruction that allows the static
+> compiler to generate a set of parallel threads, and then have
+> the runtime compiler and hardware do what they please with it.
+> This has very powerful uses, but thread-id on every instruction
+> is expensive in terms of instruction size and code size.
+> We would need to compactly encode it somehow.
+
+Yes yes yes! :) I think it would be *VERY* useful to include this kind
+of information (which EPIC architectures *implicitly* encode. The trend
+that we are seeing supports this greatly:
+
+1. Commodity processors are getting massive SIMD support:
+ * Intel/Amd MMX/MMX2
+ * AMD's 3Dnow!
+ * Intel's SSE/SSE2
+ * Sun's VIS
+2. SMP is becoming much more common, especially in the server space.
+3. Multiple processors on a die are right around the corner.
+
+If nothing else, not designing this in would severely limit our future
+expansion of the project...
+
+> Also, this will require some reading on at least two other
+> projects:
+> -- Multiscalar architecture from Wisconsin
+> -- Simultaneous multithreading architecture from Washington
+>
+> o Or forget all this and stick to a traditional instruction set?
+
+Heh... :) Well, from a pure research point of view, it is almost more
+attactive to go with the most extreme/different ISA possible. On one axis
+you get safety and conservatism, and on the other you get degree of
+influence that the results have. Of course the problem with pure research
+is that often times there is no concrete product of the research... :)
+
+> BTW, on an unrelated note, after the meeting yesterday, I did remember
+> that you had suggested doing instruction scheduling on SSA form instead
+> of a dependence DAG earlier in the semester. When we talked about
+> it yesterday, I didn't remember where the idea had come from but I
+> remembered later. Just giving credit where its due...
+
+:) Thanks.
+
+> Perhaps you can save the above as a file under RCS so you and I can
+> continue to expand on this.
+
+I think it makes sense to do so when we get our ideas more formalized and
+bounce it back and forth a couple of times... then I'll do a more formal
+writeup of our goals and ideas. Obviously our first implementation will
+not want to do all of the stuff that I pointed out above... be we will
+want to design the project so that we do not artificially limit ourselves
+at sometime in the future...
+
+Anyways, let me know what you think about these ideas... and if they sound
+reasonable...
+
+-Chris
+
diff --git a/docs/HistoricalNotes/2000-12-06-EncodingIdea.txt b/docs/HistoricalNotes/2000-12-06-EncodingIdea.txt
new file mode 100644
index 00000000000..8c452924dd1
--- /dev/null
+++ b/docs/HistoricalNotes/2000-12-06-EncodingIdea.txt
@@ -0,0 +1,30 @@
+From: Chris Lattner [mailto:sabre@nondot.org]
+Sent: Wednesday, December 06, 2000 6:41 PM
+To: Vikram S. Adve
+Subject: Additional idea with respect to encoding
+
+Here's another idea with respect to keeping the common case instruction
+size down (less than 32 bits ideally):
+
+Instead of encoding an instruction to operate on two register numbers,
+have it operate on two negative offsets based on the current register
+number. Therefore, instead of using:
+
+r57 = add r55, r56 (r57 is the implicit dest register, of course)
+
+We could use:
+
+r57 = add -2, -1
+
+My guess is that most SSA references are to recent values (especially if
+they correspond to expressions like (x+y*z+p*q/ ...), so the negative
+numbers would tend to stay small, even at the end of the procedure (where
+the implicit register destination number could be quite large). Of course
+the negative sign is reduntant, so you would be storing small integers
+almost all of the time, and 5-6 bits worth of register number would be
+plenty for most cases...
+
+What do you think?
+
+-Chris
+
diff --git a/docs/HistoricalNotes/2000-12-06-MeetingSummary.txt b/docs/HistoricalNotes/2000-12-06-MeetingSummary.txt
new file mode 100644
index 00000000000..01b644b3517
--- /dev/null
+++ b/docs/HistoricalNotes/2000-12-06-MeetingSummary.txt
@@ -0,0 +1,83 @@
+SUMMARY
+-------
+
+We met to discuss the LLVM instruction format and bytecode representation:
+
+ISSUES RESOLVED
+---------------
+
+1. We decided that we shall use a flat namespace to represent our
+ variables in SSA form, as opposed to having a two dimensional namespace
+ of the original variable and the SSA instance subscript.
+
+ARGUMENT AGAINST:
+ * A two dimensional namespace would be valuable when doing alias
+ analysis because the extra information can help limit the scope of
+ analysis.
+
+ARGUMENT FOR:
+ * Including this information would require that all users of the LLVM
+ bytecode would have to parse and handle it. This would slow down the
+ common case and inflate the instruction representation with another
+ infinite variable space.
+
+REASONING:
+ * It was decided that because original variable sources could be
+ reconstructed from SSA form in linear time, that it would be an
+ unjustified expense for the common case to include the extra
+ information for one optimization. Alias analysis itself is typically
+ greater than linear in asymptotic complexity, so this extra analaysis
+ would not affect the runtime of the optimization in a significant
+ way. Additionally, this would be an unlikely optimization to do at
+ runtime.
+
+
+IDEAS TO CONSIDER
+-----------------
+
+1. Including dominator information in the LLVM bytecode
+ representation. This is one example of an analysis result that may be
+ packaged with the bytecodes themselves. As a conceptual implementation
+ idea, we could include an immediate dominator number for each basic block
+ in the LLVM bytecode program. Basic blocks could be numbered according
+ to the order of occurrence in the bytecode representation.
+
+2. Including loop header and body information. This would facilitate
+ detection of intervals and natural loops.
+
+UNRESOLVED ISSUES
+-----------------
+
+1. Will oSUIF provide enough of an infrastructure to support the research
+ that we will be doing? We know that it has less than stellar
+ performance, but hope that this will be of little importance for our
+ static compiler. This could affect us if we decided to do some IP
+ research. Also we do not yet understand the level of exception support
+ currently implemented.
+
+2. Should we consider the requirements of a direct hardware implementation
+ of the LLVM when we design it? If so, several design issues should
+ have their priorities shifted. The other option is to focus on a
+ software layer interpreting the LLVM in all cases.
+
+3. Should we use some form of packetized format to improve forward
+ compatibility? For example, we could design the system to encode a
+ packet type and length field before analysis information, to allow a
+ runtime to skip information that it didn't understand in a bytecode
+ stream. The obvious benefit would be for compatibility, the drawback
+ is that it would tend to splinter that 'standard' LLVM definition.
+
+4. Should we use fixed length instructions or variable length
+ instructions? Fetching variable length instructions is expensive (for
+ either hardware or software based LLVM runtimes), but we have several
+ 'infinite' spaces that instructions operate in (SSA register numbers,
+ type spaces, or packet length [if packets were implemented]). Several
+ options were mentioned including:
+ A. Using 16 or 32 bit numbers, which would be 'big enough'
+ B. A scheme similar to how UTF-8 works, to encode infinite numbers
+ while keeping small number small.
+ C. Use something similar to Huffman encoding, so that the most common
+ numbers are the smallest.
+
+-Chris
+
diff --git a/docs/HistoricalNotes/2001-01-31-UniversalIRIdea.txt b/docs/HistoricalNotes/2001-01-31-UniversalIRIdea.txt
new file mode 100644
index 00000000000..111706a3447
--- /dev/null
+++ b/docs/HistoricalNotes/2001-01-31-UniversalIRIdea.txt
@@ -0,0 +1,39 @@
+Date: Wed, 31 Jan 2001 12:04:33 -0600
+From: Vikram S. Adve <vadve@cs.uiuc.edu>
+To: Chris Lattner <lattner@cs.uiuc.edu>
+Subject: another thought
+
+I have a budding idea about making LLVM a little more ambitious: a
+customizable runtime system that can be used to implement language-specific
+virtual machines for many different languages. E.g., a C vm, a C++ vm, a
+Java vm, a Lisp vm, ..
+
+The idea would be that LLVM would provide a standard set of runtime features
+(some low-level like standard assembly instructions with code generation and
+static and runtime optimization; some higher-level like type-safety and
+perhaps a garbage collection library). Each language vm would select the
+runtime features needed for that language, extending or customizing them as
+needed. Most of the machine-dependent code-generation and optimization
+features as well as low-level machine-independent optimizations (like PRE)
+could be provided by LLVM and should be sufficient for any language,
+simplifying the language compiler. (This would also help interoperability
+between languages.) Also, some or most of the higher-level
+machine-independent features like type-safety and access safety should be
+reusable by different languages, with minor extensions. The language
+compiler could then focus on language-specific analyses and optimizations.
+
+The risk is that this sounds like a universal IR -- something that the
+compiler community has tried and failed to develop for decades, and is
+universally skeptical about. No matter what we say, we won't be able to
+convince anyone that we have a universal IR that will work. We need to
+think about whether LLVM is different or if has something novel that might
+convince people. E.g., the idea of providing a package of separable
+features that different languages select from. Also, using SSA with or
+without type-safety as the intermediate representation.
+
+One interesting starting point would be to discuss how a JVM would be
+implemented on top of LLVM a bit more. That might give us clues on how to
+structure LLVM to support one or more language VMs.
+
+--Vikram
+
diff --git a/docs/HistoricalNotes/2001-02-06-TypeNotationDebate.txt b/docs/HistoricalNotes/2001-02-06-TypeNotationDebate.txt
new file mode 100644
index 00000000000..c09cf1f03cc
--- /dev/null
+++ b/docs/HistoricalNotes/2001-02-06-TypeNotationDebate.txt
@@ -0,0 +1,67 @@
+Date: Tue, 6 Feb 2001 20:27:37 -0600 (CST)
+From: Chris Lattner <sabre@nondot.org>
+To: Vikram S. Adve <vadve@cs.uiuc.edu>
+Subject: Type notation debate...
+
+This is the way that I am currently planning on implementing types:
+
+Primitive Types:
+type ::= void|bool|sbyte|ubyte|short|ushort|int|uint|long|ulong
+
+Method:
+typelist ::= typelisth | /*empty*/
+typelisth ::= type | typelisth ',' type
+type ::= type (typelist)
+
+Arrays (without and with size):
+type ::= '[' type ']' | '[' INT ',' type ']'
+
+Pointer:
+type ::= type '*'
+
+Structure:
+type ::= '{' typelist '}'
+
+Packed:
+type ::= '<' INT ',' type '>'
+
+Simple examples:
+
+[[ %4, int ]] - array of (array of 4 (int))
+[ { int, int } ] - Array of structure
+[ < %4, int > ] - Array of 128 bit SIMD packets
+int (int, [[int, %4]]) - Method taking a 2d array and int, returning int
+
+
+Okay before you comment, please look at:
+
+http://www.research.att.com/~bs/devXinterview.html
+
+Search for "In another interview, you defined the C declarator syntax as
+an experiment that failed. However, this syntactic construct has been
+around for 27 years and perhaps more; why do you consider it problematic
+(except for its cumbersome syntax)?" and read that response for me. :)
+
+Now with this syntax, his example would be represented as:
+
+[ %10, bool (int, int) * ] *
+
+vs
+
+bool (*(*)[10])(int, int)
+
+in C.
+
+Basically, my argument for this type construction system is that it is
+VERY simple to use and understand (although it IS different than C, it is
+very simple and straightforward, which C is NOT). In fact, I would assert
+that most programmers TODAY do not understand pointers to member
+functions, and have to look up an example when they have to write them.
+
+In my opinion, it is critically important to have clear and concise type
+specifications, because types are going to be all over the programs.
+
+Let me know your thoughts on this. :)
+
+-Chris
+
diff --git a/docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp1.txt b/docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp1.txt
new file mode 100644
index 00000000000..8bfefbf69f6
--- /dev/null
+++ b/docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp1.txt
@@ -0,0 +1,75 @@
+Date: Thu, 8 Feb 2001 08:42:04 -0600
+From: Vikram S. Adve <vadve@cs.uiuc.edu>
+To: Chris Lattner <sabre@nondot.org>
+Subject: RE: Type notation debate...
+
+Chris,
+
+> Okay before you comment, please look at:
+>
+> http://www.research.att.com/~bs/devXinterview.html
+
+I read this argument. Even before that, I was already in agreement with you
+and him that the C declarator syntax is difficult and confusing.
+
+But in fact, if you read the entire answer carefully, he came to the same
+conclusion I do: that you have to go with familiar syntax over logical
+syntax because familiarity is such a strong force:
+
+ "However, familiarity is a strong force. To compare, in English, we
+live
+more or less happily with the absurd rules for "to be" (am, are, is, been,
+was, were, ...) and all attempts to simplify are treated with contempt or
+(preferably) humor. It be a curious world and it always beed."
+
+> Basically, my argument for this type construction system is that it is
+> VERY simple to use and understand (although it IS different than C, it is
+> very simple and straightforward, which C is NOT). In fact, I would assert
+> that most programmers TODAY do not understand pointers to member
+> functions, and have to look up an example when they have to write them.
+
+Again, I don't disagree with this at all. But to some extent this
+particular problem is inherently difficult. Your syntax for the above
+example may be easier for you to read because this is the way you have been
+thinking about it. Honestly, I don't find it much easier than the C syntax.
+In either case, I would have to look up an example to write pointers to
+member functions.
+
+But pointers to member functions are nowhere near as common as arrays. And
+the old array syntax:
+ type [ int, int, ...]
+is just much more familiar and clear to people than anything new you
+introduce, no matter how logical it is. Introducing a new syntax that may
+make function pointers easier but makes arrays much more difficult seems
+very risky to me.
+
+> In my opinion, it is critically important to have clear and concise type
+> specifications, because types are going to be all over the programs.
+
+I absolutely agree. But the question is, what is more clear and concise?
+The syntax programmers are used to out of years of experience or a new
+syntax that they have never seen that has a more logical structure. I think
+the answer is the former. Sometimes, you have to give up a better idea
+because you can't overcome sociological barriers to it. Qwerty keyboards
+and Windows are two classic examples of bad technology that are difficult to
+root out.
+
+P.S. Also, while I agree that most your syntax is more logical, there is
+one part that isn't:
+
+Arrays (without and with size):
+type ::= '[' type ']' | '[' INT ',' type ']'.
+
+The arrays with size lists the dimensions and the type in a single list.
+That is just too confusing:
+ [10, 40, int]
+This seems to be a 3-D array where the third dimension is something strange.
+It is too confusing to have a list of 3 things, some of which are dimensions
+and one is a type. Either of the following would be better:
+
+ array [10, 40] of int
+or
+ int [10, 40]
+
+--Vikram
+
diff --git a/docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp2.txt b/docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp2.txt
new file mode 100644
index 00000000000..6e9784158a3
--- /dev/null
+++ b/docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp2.txt
@@ -0,0 +1,53 @@
+Date: Thu, 8 Feb 2001 14:31:05 -0600 (CST)
+From: Chris Lattner <sabre@nondot.org>
+To: Vikram S. Adve <vadve@cs.uiuc.edu>
+Subject: RE: Type notation debate...
+
+> Arrays (without and with size):
+> type ::= '[' type ']' | '[' INT ',' type ']'.
+>
+> The arrays with size lists the dimensions and the type in a single list.
+> That is just too confusing:
+
+> [10, 40, int]
+> This seems to be a 3-D array where the third dimension is something strange.
+> It is too confusing to have a list of 3 things, some of which are dimensions
+> and one is a type.
+
+The above grammar indicates that there is only one integer parameter, ie
+the upper bound. The lower bound is always implied to be zero, for
+several reasons:
+
+* As a low level VM, we want to expose addressing computations
+ explicitly. Since the lower bound must always be known in a high level
+ language statically, the language front end can do the translation
+ automatically.
+* This fits more closely with what Java needs, ie what we need in the
+ short term. Java arrays are always zero based.
+
+If a two element list is too confusing, I would recommend an alternate
+syntax of:
+
+type ::= '[' type ']' | '[' INT 'x' type ']'.
+
+For example:
+ [12 x int]
+ [12x int]
+ [ 12 x [ 4x int ]]
+
+Which is syntactically nicer, and more explicit.
+
+> Either of the following would be better:
+> array [10, 40] of int
+
+I considered this approach for arrays in general (ie array of int/ array
+of 12 int), but found that it made declarations WAY too long. Remember
+that because of the nature of llvm, you get a lot of types strewn all over
+the program, and using the 'typedef' like facility is not a wonderful
+option, because then types aren't explicit anymore.
+
+I find this email interesting, because you contradict the previous email
+you sent, where you recommend that we stick to C syntax....
+
+-Chris
+
diff --git a/docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp4.txt b/docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp4.txt
new file mode 100644
index 00000000000..839732444f9
--- /dev/null
+++ b/docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp4.txt
@@ -0,0 +1,89 @@
+> But in fact, if you read the entire answer carefully, he came to the same
+> conclusion I do: that you have to go with familiar syntax over logical
+> syntax because familiarity is such a strong force:
+> "However, familiarity is a strong force. To compare, in English, we
+live
+> more or less happily with the absurd rules for "to be" (am, are, is, been,
+> was, were, ...) and all attempts to simplify are treated with contempt or
+> (preferably) humor. It be a curious world and it always beed."
+
+Although you have to remember that his situation was considerably
+different than ours. He was in a position where he was designing a high
+level language that had to be COMPATIBLE with C. Our language is such
+that a new person would have to learn the new, different, syntax
+anyways. Making them learn about the type system does not seem like much
+of a stretch from learning the opcodes and how SSA form works, and how
+everything ties together...
+
+> > Basically, my argument for this type construction system is that it is
+> > VERY simple to use and understand (although it IS different than C, it is
+> > very simple and straightforward, which C is NOT). In fact, I would assert
+> > that most programmers TODAY do not understand pointers to member
+> > functions, and have to look up an example when they have to write them.
+
+> Again, I don't disagree with this at all. But to some extent this
+> particular problem is inherently difficult. Your syntax for the above
+> example may be easier for you to read because this is the way you have been
+> thinking about it. Honestly, I don't find it much easier than the C syntax.
+> In either case, I would have to look up an example to write pointers to
+> member functions.
+
+I would argue that because the lexical structure of the language is self
+consistent, any person who spent a significant amount of time programming
+in LLVM directly would understand how to do it without looking it up in a
+manual. The reason this does not work for C is because you rarely have to
+declare these pointers, and the syntax is inconsistent with the method
+declaration and calling syntax.
+
+> But pointers to member functions are nowhere near as common as arrays.
+
+Very true. If you're implementing an object oriented language, however,
+remember that you have to do all the pointer to member function stuff
+yourself.... so every time you invoke a virtual method one is involved
+(instead of having C++ hide it for you behind "syntactic sugar").
+
+> And the old array syntax:
+> type [ int, int, ...]
+> is just much more familiar and clear to people than anything new you
+> introduce, no matter how logical it is.
+
+Erm... excuse me but how is this the "old array syntax"? If you are
+arguing for consistency with C, you should be asking for 'type int []',
+which is significantly different than the above (beside the above
+introduces a new operator and duplicates information
+needlessly). Basically what I am suggesting is exactly the above without
+the fluff. So instead of:
+
+ type [ int, int, ...]
+
+you use:
+
+ type [ int ]
+
+> Introducing a new syntax that may
+> make function pointers easier but makes arrays much more difficult seems
+> very risky to me.
+
+This is not about function pointers. This is about consistency in the
+type system, and consistency with the rest of the language. The point
+above does not make arrays any more difficult to use, and makes the
+structure of types much more obvious than the "c way".
+
+> > In my opinion, it is critically important to have clear and concise type
+> > specifications, because types are going to be all over the programs.
+>
+> I absolutely agree. But the question is, what is more clear and concise?
+> The syntax programmers are used to out of years of experience or a new
+> syntax that they have never seen that has a more logical structure. I think
+> the answer is the former. Sometimes, you have to give up a better idea
+> because you can't overcome sociological barriers to it. Qwerty keyboards
+> and Windows are two classic examples of bad technology that are difficult to
+> root out.
+
+Very true, but you seem to be advocating a completely different Type
+system than C has, in addition to it not offering the advantages of clear
+structure that the system I recommended does... so you seem to not have a
+problem with changing this, just with what I change it to. :)
+
+-Chris
+
diff --git a/docs/HistoricalNotes/2001-02-09-AdveComments.txt b/docs/HistoricalNotes/2001-02-09-AdveComments.txt
new file mode 100644
index 00000000000..5503233c1ed
--- /dev/null
+++ b/docs/HistoricalNotes/2001-02-09-AdveComments.txt
@@ -0,0 +1,120 @@
+Ok, here are my comments and suggestions about the LLVM instruction set.
+We should discuss some now, but can discuss many of them later, when we
+revisit synchronization, type inference, and other issues.
+(We have discussed some of the comments already.)
+
+
+o We should consider eliminating the type annotation in cases where it is
+ essentially obvious from the instruction type, e.g., in br, it is obvious
+ that the first arg. should be a bool and the other args should be labels:
+
+ br bool <cond>, label <iftrue>, label <iffalse>
+
+ I think your point was that making all types explicit improves clarity
+ and readability. I agree to some extent, but it also comes at the cost
+ of verbosity. And when the types are obvious from people's experience
+ (e.g., in the br instruction), it doesn't seem to help as much.
+
+
+o On reflection, I really like your idea of having the two different switch
+ types (even though they encode implementation techniques rather than
+ semantics). It should simplify building the CFG and my guess is it could
+ enable some significant optimizations, though we should think about which.
+
+
+o In the lookup-indirect form of the switch, is there a reason not to make
+ the val-type uint? Most HLL switch statements (including Java and C++)
+ require that anyway. And it would also make the val-type uniform
+ in the two forms of the switch.
+
+ I did see the switch-on-bool examples and, while cute, we can just use
+ the branch instructions in that particular case.
+
+
+o I agree with your comment that we don't need 'neg'.
+
+
+o There's a trade-off with the cast instruction:
+ + it avoids having to define all the upcasts and downcasts that are
+ valid for the operands of each instruction (you probably have thought
+ of other benefits also)
+ - it could make the bytecode significantly larger because there could
+ be a lot of cast operations
+
+
+o Making the second arg. to 'shl' a ubyte seems good enough to me.
+ 255 positions seems adequate for several generations of machines
+ and is more compact than uint.
+
+
+o I still have some major concerns about including malloc and free in the
+ language (either as builtin functions or instructions). LLVM must be
+ able to represent code from many different languages. Languages such as
+ C, C++ Java and Fortran 90 would not be able to use our malloc anyway
+ because each of them will want to provide a library implementation of it.
+
+ This gets even worse when code from different languages is linked
+ into a single executable (which is fairly common in large apps).
+ Having a single malloc would just not suffice, and instead would simply
+ complicate the picture further because it adds an extra variant in
+ addition to the one each language provides.
+
+ Instead, providing a default library version of malloc and free
+ (and perhaps a malloc_gc with garbage collection instead of free)
+ would make a good implementation available to anyone who wants it.
+
+ I don't recall all your arguments in favor so let's discuss this again,
+ and soon.
+
+
+o 'alloca' on the other hand sounds like a good idea, and the
+ implementation seems fairly language-independent so it doesn't have the
+ problems with malloc listed above.
+
+
+o About indirect call:
+ Your option #2 sounded good to me. I'm not sure I understand your
+ concern about an explicit 'icall' instruction?
+
+
+o A pair of important synchronization instr'ns to think about:
+ load-linked
+ store-conditional
+
+
+o Other classes of instructions that are valuable for pipeline performance:
+ conditional-move
+ predicated instructions
+
+
+o I believe tail calls are relatively easy to identify; do you know why
+ .NET has a tailcall instruction?
+
+
+o I agree that we need a static data space. Otherwise, emulating global
+ data gets unnecessarily complex.
+
+
+o About explicit parallelism:
+
+ We once talked about adding a symbolic thread-id field to each
+ instruction. (It could be optional so single-threaded codes are
+ not penalized.) This could map well to multi-threaded architectures
+ while providing easy ILP for single-threaded onces. But it is probably
+ too radical an idea to include in a base version of LLVM. Instead, it
+ could a great topic for a separate study.
+
+ What is the semantics of the IA64 stop bit?
+
+
+
+
+o And finally, another thought about the syntax for arrays :-)
+
+ Although this syntax:
+ array <dimension-list> of <type>
+ is verbose, it will be used only in the human-readable assembly code so
+ size should not matter. I think we should consider it because I find it
+ to be the clearest syntax. It could even make arrays of function
+ pointers somewhat readable.
+
diff --git a/docs/HistoricalNotes/2001-02-09-AdveCommentsResponse.txt b/docs/HistoricalNotes/2001-02-09-AdveCommentsResponse.txt
new file mode 100644
index 00000000000..da502636653
--- /dev/null
+++ b/docs/HistoricalNotes/2001-02-09-AdveCommentsResponse.txt
@@ -0,0 +1,245 @@
+From: Chris Lattner <sabre@nondot.org>
+To: "Vikram S. Adve" <vadve@cs.uiuc.edu>
+Subject: Re: LLVM Feedback
+
+I've included your feedback in the /home/vadve/lattner/llvm/docs directory
+so that it will live in CVS eventually with the rest of LLVM. I've
+significantly updated the documentation to reflect the changes you
+suggested, as specified below:
+
+> We should consider eliminating the type annotation in cases where it is
+> essentially obvious from the instruction type:
+> br bool <cond>, label <iftrue>, label <iffalse>
+> I think your point was that making all types explicit improves clarity
+> and readability. I agree to some extent, but it also comes at the
+> cost of verbosity. And when the types are obvious from people's
+> experience (e.g., in the br instruction), it doesn't seem to help as
+> much.
+
+Very true. We should discuss this more, but my reasoning is more of a
+consistency argument. There are VERY few instructions that can have all
+of the types eliminated, and doing so when available unnecessarily makes
+the language more difficult to handle. Especially when you see 'int
+%this' and 'bool %that' all over the place, I think it would be
+disorienting to see:
+
+ br %predicate, %iftrue, %iffalse
+
+for branches. Even just typing that once gives me the creeps. ;) Like I
+said, we should probably discuss this further in person...
+
+> On reflection, I really like your idea of having the two different
+> switch types (even though they encode implementation techniques rather
+> than semantics). It should simplify building the CFG and my guess is it
+> could enable some significant optimizations, though we should think
+> about which.
+
+Great. I added a note to the switch section commenting on how the VM
+should just use the instruction type as a hint, and that the
+implementation may choose altermate representations (such as predicated
+branches).
+
+> In the lookup-indirect form of the switch, is there a reason not to
+> make the val-type uint?
+
+No. This was something I was debating for a while, and didn't really feel
+strongly about either way. It is common to switch on other types in HLL's
+(for example signed int's are particularly common), but in this case, all
+that will be added is an additional 'cast' instruction. I removed that
+from the spec.
+
+> I agree with your comment that we don't need 'neg'
+
+Removed.
+
+> There's a trade-off with the cast instruction:
+> + it avoids having to define all the upcasts and downcasts that are
+> valid for the operands of each instruction (you probably have
+> thought of other benefits also)
+> - it could make the bytecode significantly larger because there could
+> be a lot of cast operations
+
+ + You NEED casts to represent things like:
+ void foo(float);
+ ...
+ int x;
+ ...
+ foo(x);
+ in a language like C. Even in a Java like language, you need upcasts
+ and some way to implement dynamic downcasts.
+ + Not all forms of instructions take every type (for example you can't
+ shift by a floating point number of bits), thus SOME programs will need
+ implicit casts.
+
+To be efficient and to avoid your '-' point above, we just have to be
+careful to specify that the instructions shall operate on all common
+types, therefore casting should be relatively uncommon. For example all
+of the arithmetic operations work on almost all data types.
+
+> Making the second arg. to 'shl' a ubyte seems good enough to me.
+> 255 positions seems adequate for several generations of machines
+
+Okay, that comment is removed.
+
+> and is more compact than uint.
+
+No, it isn't. Remember that the bytecode encoding saves value slots into
+the bytecode instructions themselves, not constant values. This is
+another case where we may introduce more cast instructions (but we will
+also reduce the number of opcode variants that must be supported by a
+virtual machine). Because most shifts are by constant values, I don't
+think that we'll have to cast many shifts. :)
+
+> I still have some major concerns about including malloc and free in the
+> language (either as builtin functions or instructions).
+
+Agreed. How about this proposal:
+
+malloc/free are either built in functions or actual opcodes. They provide
+all of the type safety that the document would indicate, blah blah
+blah. :)
+
+Now, because of all of the excellent points that you raised, an
+implementation may want to override the default malloc/free behavior of
+the program. To do this, they simply implement a "malloc" and
+"free" function. The virtual machine will then be defined to use the user
+defined malloc/free function (which return/take void*'s, not type'd
+pointers like the builtin function would) if one is available, otherwise
+fall back on a system malloc/free.
+
+Does this sound like a good compromise? It would give us all of the
+typesafety/elegance in the language while still allowing the user to do
+all the cool stuff they want to...
+
+> 'alloca' on the other hand sounds like a good idea, and the
+> implementation seems fairly language-independent so it doesn't have the
+> problems with malloc listed above.
+
+Okay, once we get the above stuff figured out, I'll put it all in the
+spec.
+
+> About indirect call:
+> Your option #2 sounded good to me. I'm not sure I understand your
+> concern about an explicit 'icall' instruction?
+
+I worry too much. :) The other alternative has been removed. 'icall' is
+now up in the instruction list next to 'call'.
+
+> I believe tail calls are relatively easy to identify; do you know why
+> .NET has a tailcall instruction?
+
+Although I am just guessing, I believe it probably has to do with the fact
+that they want languages like Haskell and lisp to be efficiently runnable
+on their VM. Of course this means that the VM MUST implement tail calls
+'correctly', or else life will suck. :) I would put this into a future
+feature bin, because it could be pretty handy...
+
+> A pair of important synchronization instr'ns to think about:
+> load-linked
+> store-conditional
+
+What is 'load-linked'? I think that (at least for now) I should add these
+to the 'possible extensions' section, because they are not immediately
+needed...
+
+> Other classes of instructions that are valuable for pipeline
+> performance:
+> conditional-move
+> predicated instructions
+
+Conditional move is effectly a special case of a predicated
+instruction... and I think that all predicated instructions can possibly
+be implemented later in LLVM. It would significantly change things, and
+it doesn't seem to be very necessary right now. It would seem to
+complicate flow control analysis a LOT in the virtual machine. I would
+tend to prefer that a predicated architecture like IA64 convert from a
+"basic block" representation to a predicated rep as part of it's dynamic
+complication phase. Also, if a basic block contains ONLY a move, then
+that can be trivally translated into a conditional move...
+
+> I agree that we need a static data space. Otherwise, emulating global
+> data gets unnecessarily complex.
+
+Definitely. Also a later item though. :)
+
+> We once talked about adding a symbolic thread-id field to each
+> ..
+> Instead, it could a great topic for a separate study.
+
+Agreed. :)
+
+> What is the semantics of the IA64 stop bit?
+
+Basically, the IA64 writes instructions like this:
+mov ...
+add ...
+sub ...
+op xxx
+op xxx
+;;
+mov ...
+add ...
+sub ...
+op xxx
+op xxx
+;;
+
+Where the ;; delimits a group of instruction with no dependencies between
+them, which can all be executed concurrently (to the limits of the
+available functional units). The ;; gets translated into a bit set in one
+of the opcodes.
+
+The advantages of this representation is that you don't have to do some
+kind of 'thread id scheduling' pass by having to specify ahead of time how
+many threads to use, and the representation doesn't have a per instruction
+overhead...
+
+> And finally, another thought about the syntax for arrays :-)
+> Although this syntax:
+> array <dimension-list> of <type>
+> is verbose, it will be used only in the human-readable assembly code so
+> size should not matter. I think we should consider it because I find it
+> to be the clearest syntax. It could even make arrays of function
+> pointers somewhat readable.
+
+My only comment will be to give you an example of why this is a bad
+idea. :)
+
+Here is an example of using the switch statement (with my recommended
+syntax):
+
+switch uint %val, label %otherwise,
+ [%3 x {uint, label}] [ { uint %57, label %l1 },
+ { uint %20, label %l2 },
+ { uint %14, label %l3 } ]
+
+Here it is with the syntax you are proposing:
+
+switch uint %val, label %otherwise,
+ array %3 of {uint, label}
+ array of {uint, label}
+ { uint %57, label %l1 },
+ { uint %20, label %l2 },
+ { uint %14, label %l3 }
+
+Which is ambiguous and very verbose. It would be possible to specify
+constants with [] brackets as in my syntax, which would look like this:
+
+switch uint %val, label %otherwise,
+ array %3 of {uint, label} [ { uint %57, label %l1 },
+ { uint %20, label %l2 },
+ { uint %14, label %l3 } ]
+
+But then the syntax is inconsistent between type definition and constant
+definition (why do []'s enclose the constants but not the types??).
+
+Anyways, I'm sure that there is much debate still to be had over
+this... :)
+
+-Chris
+
+http://www.nondot.org/~sabre/os/
+http://www.nondot.org/MagicStats/
+http://korbit.sourceforge.net/
+
+
diff --git a/docs/HistoricalNotes/2001-02-13-Reference-Memory.txt b/docs/HistoricalNotes/2001-02-13-Reference-Memory.txt
new file mode 100644
index 00000000000..2c7534d9da1
--- /dev/null
+++ b/docs/HistoricalNotes/2001-02-13-Reference-Memory.txt
@@ -0,0 +1,39 @@
+Date: Tue, 13 Feb 2001 13:29:52 -0600 (CST)
+From: Chris Lattner <sabre@nondot.org>
+To: Vikram S. Adve <vadve@cs.uiuc.edu>
+Subject: LLVM Concerns...
+
+
+I've updated the documentation to include load store and allocation
+instructions (please take a look and let me know if I'm on the right
+track):
+
+file:/home/vadve/lattner/llvm/docs/LangRef.html#memoryops
+
+I have a couple of concerns I would like to bring up:
+
+1. Reference types
+ Right now, I've spec'd out the language to have a pointer type, which
+ works fine for lots of stuff... except that Java really has
+ references: constrained pointers that cannot be manipulated: added and
+ subtracted, moved, etc... Do we want to have a type like this? It
+ could be very nice for analysis (pointer always points to the start of
+ an object, etc...) and more closely matches Java semantics. The
+ pointer type would be kept for C++ like semantics. Through analysis,
+ C++ pointers could be promoted to references in the LLVM
+ representation.
+
+2. Our "implicit" memory references in assembly language:
+ After thinking about it, this model has two problems:
+ A. If you do pointer analysis and realize that two stores are
+ independent and can share the same memory source object, there is
+ no way to represent this in either the bytecode or assembly.
+ B. When parsing assembly/bytecode, we effectively have to do a full
+ SSA generation/PHI node insertion pass to build the dependencies
+ when we don't want the "pinned" representation. This is not
+ cool.
+ I'm tempted to make memory references explicit in both the assembly and
+ bytecode to get around this... what do you think?
+
+-Chris
+
diff --git a/docs/HistoricalNotes/2001-02-13-Reference-MemoryResponse.txt b/docs/HistoricalNotes/2001-02-13-Reference-MemoryResponse.txt
new file mode 100644
index 00000000000..505343378df
--- /dev/null
+++ b/docs/HistoricalNotes/2001-02-13-Reference-MemoryResponse.txt
@@ -0,0 +1,47 @@
+Date: Tue, 13 Feb 2001 18:25:42 -0600
+From: Vikram S. Adve <vadve@cs.uiuc.edu>
+To: Chris Lattner <sabre@nondot.org>
+Subject: RE: LLVM Concerns...
+
+> 1. Reference types
+> Right now, I've spec'd out the language to have a pointer type, which
+> works fine for lots of stuff... except that Java really has
+> references: constrained pointers that cannot be manipulated: added and
+> subtracted, moved, etc... Do we want to have a type like this? It
+> could be very nice for analysis (pointer always points to the start of
+> an object, etc...) and more closely matches Java semantics. The
+> pointer type would be kept for C++ like semantics. Through analysis,
+> C++ pointers could be promoted to references in the LLVM
+> representation.
+
+
+You're right, having references would be useful. Even for C++ the *static*
+compiler could generate references instead of pointers with fairly
+straightforward analysis. Let's include a reference type for now. But I'm
+also really concerned that LLVM is becoming big and complex and (perhaps)
+too high-level. After we get some initial performance results, we may have
+a clearer idea of what our goals should be and we should revisit this
+question then.
+
+> 2. Our "implicit" memory references in assembly language:
+> After thinking about it, this model has two problems:
+> A. If you do pointer analysis and realize that two stores are
+> independent and can share the same memory source object,
+
+not sure what you meant by "share the same memory source object"
+
+> there is
+> no way to represent this in either the bytecode or assembly.
+> B. When parsing assembly/bytecode, we effectively have to do a full
+> SSA generation/PHI node insertion pass to build the dependencies
+> when we don't want the "pinned" representation. This is not
+> cool.
+
+I understand the concern. But again, let's focus on the performance first
+and then look at the language design issues. E.g., it would be good to know
+how big the bytecode files are before expanding them further. I am pretty
+keen to explore the implications of LLVM for mobile devices. Both bytecode
+size and power consumption are important to consider there.
+
+--Vikram
+
diff --git a/docs/HistoricalNotes/2001-04-16-DynamicCompilation.txt b/docs/HistoricalNotes/2001-04-16-DynamicCompilation.txt
new file mode 100644
index 00000000000..5f7843ab563
--- /dev/null
+++ b/docs/HistoricalNotes/2001-04-16-DynamicCompilation.txt
@@ -0,0 +1,49 @@
+By Chris:
+
+LLVM has been designed with two primary goals in mind. First we strive to
+enable the best possible division of labor between static and dynamic
+compilers, and second, we need a flexible and powerful interface
+between these two complementary stages of compilation. We feel that
+providing a solution to these two goals will yield an excellent solution
+to the performance problem faced by modern architectures and programming
+languages.
+
+A key insight into current compiler and runtime systems is that a
+compiler may fall in anywhere in a "continuum of compilation" to do its
+job. On one side, scripting languages statically compile nothing and
+dynamically compile (or equivalently, interpret) everything. On the far
+other side, traditional static compilers process everything statically and
+nothing dynamically. These approaches have typically been seen as a
+tradeoff between performance and portability. On a deeper level, however,
+there are two reasons that optimal system performance may be obtained by a
+system somewhere in between these two extremes: Dynamic application
+behavior and social constraints.
+
+From a technical perspective, pure static compilation cannot ever give
+optimal performance in all cases, because applications have varying dynamic
+behavior that the static compiler cannot take into consideration. Even
+compilers that support profile guided optimization generate poor code in
+the real world, because using such optimization tunes that application
+to one particular usage pattern, whereas real programs (as opposed to
+benchmarks) often have several different usage patterns.
+
+On a social level, static compilation is a very shortsighted solution to
+the performance problem. Instruction set architectures (ISAs) continuously
+evolve, and each implementation of an ISA (a processor) must choose a set
+of tradeoffs that make sense in the market context that it is designed for.
+With every new processor introduced, the vendor faces two fundamental
+problems: First, there is a lag time between when a processor is introduced
+to when compilers generate quality code for the architecture. Secondly,
+even when compilers catch up to the new architecture there is often a large
+body of legacy code that was compiled for previous generations and will
+not or can not be upgraded. Thus a large percentage of code running on a
+processor may be compiled quite sub-optimally for the current
+characteristics of the dynamic execution environment.
+
+For these reasons, LLVM has been designed from the beginning as a long-term
+solution to these problems. Its design allows the large body of platform
+independent, static, program optimizations currently in compilers to be
+reused unchanged in their current form. It also provides important static
+type information to enable powerful dynamic and link time optimizations
+to be performed quickly and efficiently. This combination enables an
+increase in effective system performance for real world environments.
diff --git a/docs/HistoricalNotes/2001-05-18-ExceptionHandling.txt b/docs/HistoricalNotes/2001-05-18-ExceptionHandling.txt
new file mode 100644
index 00000000000..b546301d35a
--- /dev/null
+++ b/docs/HistoricalNotes/2001-05-18-ExceptionHandling.txt
@@ -0,0 +1,202 @@
+Meeting notes: Implementation idea: Exception Handling in C++/Java
+
+The 5/18/01 meeting discussed ideas for implementing exceptions in LLVM.
+We decided that the best solution requires a set of library calls provided by
+the VM, as well as an extension to the LLVM function invocation syntax.
+
+The LLVM function invocation instruction previously looks like this (ignoring
+types):
+
+ call func(arg1, arg2, arg3)
+
+The extension discussed today adds an optional "with" clause that
+associates a label with the call site. The new syntax looks like this:
+
+ call func(arg1, arg2, arg3) with funcCleanup
+
+This funcHandler always stays tightly associated with the call site (being
+encoded directly into the call opcode itself), and should be used whenever
+there is cleanup work that needs to be done for the current function if
+an exception is thrown by func (or if we are in a try block).
+
+To support this, the VM/Runtime provide the following simple library
+functions (all syntax in this document is very abstract):
+
+typedef struct { something } %frame;
+ The VM must export a "frame type", that is an opaque structure used to
+ implement different types of stack walking that may be used by various
+ language runtime libraries. We imagine that it would be typical to
+ represent a frame with a PC and frame pointer pair, although that is not
+ required.
+
+%frame getStackCurrentFrame();
+ Get a frame object for the current function. Note that if the current
+ function was inlined into its caller, the "current" frame will belong to
+ the "caller".
+
+bool isFirstFrame(%frame f);
+ Returns true if the specified frame is the top level (first activated) frame
+ for this thread. For the main thread, this corresponds to the main()
+ function, for a spawned thread, it corresponds to the thread function.
+
+%frame getNextFrame(%frame f);
+ Return the previous frame on the stack. This function is undefined if f
+ satisfies the predicate isFirstFrame(f).
+
+Label *getFrameLabel(%frame f);
+ If a label was associated with f (as discussed below), this function returns
+ it. Otherwise, it returns a null pointer.
+
+doNonLocalBranch(Label *L);
+ At this point, it is not clear whether this should be a function or
+ intrinsic. It should probably be an intrinsic in LLVM, but we'll deal with
+ this issue later.
+
+
+Here is a motivating example that illustrates how these facilities could be
+used to implement the C++ exception model:
+
+void TestFunction(...) {
+ A a; B b;
+ foo(); // Any function call may throw
+ bar();
+ C c;
+
+ try {
+ D d;
+ baz();
+ } catch (int) {
+ ...int Stuff...
+ // execution continues after the try block: the exception is consumed
+ } catch (double) {
+ ...double stuff...
+ throw; // Exception is propogated
+ }
+}
+
+This function would compile to approximately the following code (heavy
+pseudo code follows):
+
+Func:
+ %a = alloca A
+ A::A(%a) // These ctors & dtors could throw, but we ignore this
+ %b = alloca B // minor detail for this example
+ B::B(%b)
+
+ call foo() with fooCleanup // An exception in foo is propogated to fooCleanup
+ call bar() with barCleanup // An exception in bar is propogated to barCleanup
+
+ %c = alloca C
+ C::C(c)
+ %d = alloca D
+ D::D(d)
+ call baz() with bazCleanup // An exception in baz is propogated to bazCleanup
+ d->~D();
+EndTry: // This label corresponds to the end of the try block
+ c->~C() // These could also throw, these are also ignored
+ b->~B()
+ a->~A()
+ return
+
+Note that this is a very straight forward and literal translation: exactly
+what we want for zero cost (when unused) exception handling. Especially on
+platforms with many registers (ie, the IA64) setjmp/longjmp style exception
+handling is *very* impractical. Also, the "with" clauses describe the
+control flow paths explicitly so that analysis is not adversly effected.
+
+The foo/barCleanup labels are implemented as:
+
+TryCleanup: // Executed if an exception escapes the try block
+ c->~C()
+barCleanup: // Executed if an exception escapes from bar()
+ // fall through
+fooCleanup: // Executed if an exception escapes from foo()
+ b->~B()
+ a->~A()
+ Exception *E = getThreadLocalException()
+ call throw(E) // Implemented by the C++ runtime, described below
+
+Which does the work one would expect. getThreadLocalException is a function
+implemented by the C++ support library. It returns the current exception
+object for the current thread. Note that we do not attempt to recycle the
+shutdown code from before, because performance of the mainline code is
+critically important. Also, obviously fooCleanup and barCleanup may be
+merged and one of them eliminated. This just shows how the code generator
+would most likely emit code.
+
+The bazCleanup label is more interesting. Because the exception may be caught
+by the try block, we must dispatch to its handler... but it does not exist
+on the call stack (it does not have a VM Call->Label mapping installed), so
+we must dispatch statically with a goto. The bazHandler thus appears as:
+
+bazHandler:
+ d->~D(); // destruct D as it goes out of scope when entering catch clauses
+ goto TryHandler
+
+In general, TryHandler is not the same as bazHandler, because multiple
+function calls could be made from the try block. In this case, trivial
+optimization could merge the two basic blocks. TryHandler is the code
+that actually determines the type of exception, based on the Exception object
+itself. For this discussion, assume that the exception object contains *at
+least*:
+
+1. A pointer to the RTTI info for the contained object
+2. A pointer to the dtor for the contained object
+3. The contained object itself
+
+Note that it is necessary to maintain #1 & #2 in the exception object itself
+because objects without virtual function tables may be thrown (as in this
+example). Assuming this, TryHandler would look something like this:
+
+TryHandler:
+ Exception *E = getThreadLocalException();
+ switch (E->RTTIType) {
+ case IntRTTIInfo:
+ ...int Stuff... // The action to perform from the catch block
+ break;
+ case DoubleRTTIInfo:
+ ...double Stuff... // The action to perform from the catch block
+ goto TryCleanup // This catch block rethrows the exception
+ break; // Redundant, eliminated by the optimizer
+ default:
+ goto TryCleanup // Exception not caught, rethrow
+ }
+
+ // Exception was consumed
+ if (E->dtor)
+ E->dtor(E->object) // Invoke the dtor on the object if it exists
+ goto EndTry // Continue mainline code...
+
+And that is all there is to it.
+
+The throw(E) function would then be implemented like this (which may be
+inlined into the caller through standard optimization):
+
+function throw(Exception *E) {
+ // Get the start of the stack trace...
+ %frame %f = call getStackCurrentFrame()
+
+ // Get the label information that corresponds to it
+ label * %L = call getFrameLabel(%f)
+ while (%L == 0 && !isFirstFrame(%f)) {
+ // Loop until a cleanup handler is found
+ %f = call getNextFrame(%f)
+ %L = call getFrameLabel(%f)
+ }
+
+ if (%L != 0) {
+ call setThreadLocalException(E) // Allow handlers access to this...
+ call doNonLocalBranch(%L)
+ }
+ // No handler found!
+ call BlowUp() // Ends up calling the terminate() method in use
+}
+
+That's a brief rundown of how C++ exception handling could be implemented in
+llvm. Java would be very similar, except it only uses destructors to unlock
+synchronized blocks, not to destroy data. Also, it uses two stack walks: a
+nondestructive walk that builds a stack trace, then a destructive walk that
+unwinds the stack as shown here.
+
+It would be trivial to get exception interoperability between C++ and Java.
+
diff --git a/docs/HistoricalNotes/2001-05-19-ExceptionResponse.txt b/docs/HistoricalNotes/2001-05-19-ExceptionResponse.txt
new file mode 100644
index 00000000000..3375365f54c
--- /dev/null
+++ b/docs/HistoricalNotes/2001-05-19-ExceptionResponse.txt
@@ -0,0 +1,45 @@
+Date: Sat, 19 May 2001 19:09:13 -0500 (CDT)
+From: Chris Lattner <sabre@nondot.org>
+To: Vikram S. Adve <vadve@cs.uiuc.edu>
+Subject: RE: Meeting writeup
+
+> I read it through and it looks great!
+
+Thanks!
+
+> The finally clause in Java may need more thought. The code for this clause
+> is like a subroutine because it needs to be entered from many points (end of
+> try block and beginning of each catch block), and then needs to *return to
+> the place from where the code was entered*. That's why JVM has the
+> jsr/jsr_w instruction.
+
+Hrm... I guess that is an implementation decision. It can either be
+modelled as a subroutine (as java bytecodes do), which is really
+gross... or it can be modelled as code duplication (emitted once inline,
+then once in the exception path). Because this could, at worst,
+slightly less than double the amount of code in a function (it is
+bounded) I don't think this is a big deal. One of the really nice things
+about the LLVM representation is that it still allows for runtime code
+generation for exception paths (exceptions paths are not compiled until
+needed). Obviously a static compiler couldn't do this though. :)
+
+In this case, only one copy of the code would be compiled... until the
+other one is needed on demand. Also this strategy fits with the "zero
+cost" exception model... the standard case is not burdened with extra
+branches or "call"s.
+
+> I suppose you could save the return address in a particular register
+> (specific to this finally block), jump to the finally block, and then at the
+> end of the finally block, jump back indirectly through this register. It
+> will complicate building the CFG but I suppose that can be handled. It is
+> also unsafe in terms of checking where control returns (which is I suppose
+> why the JVM doesn't use this).
+
+I think that a code duplication method would be cleaner, and would avoid
+the caveats that you mention. Also, it does not slow down the normal case
+with an indirect branch...
+
+Like everything, we can probably defer a final decision until later. :)
+
+-Chris
+
diff --git a/docs/HistoricalNotes/2001-06-01-GCCOptimizations.txt b/docs/HistoricalNotes/2001-06-01-GCCOptimizations.txt
new file mode 100644
index 00000000000..97af16a2dad
--- /dev/null
+++ b/docs/HistoricalNotes/2001-06-01-GCCOptimizations.txt
@@ -0,0 +1,63 @@
+Date: Fri, 1 Jun 2001 16:38:17 -0500 (CDT)
+From: Chris Lattner <sabre@nondot.org>
+To: Vikram S. Adve <vadve@cs.uiuc.edu>
+Subject: Interesting: GCC passes
+
+
+Take a look at this document (which describes the order of optimizations
+that GCC performs):
+
+http://gcc.gnu.org/onlinedocs/gcc_17.html
+
+The rundown is that after RTL generation, the following happens:
+
+1 . [t] jump optimization (jumps to jumps, etc)
+2 . [t] Delete unreachable code
+3 . Compute live ranges for CSE
+4 . [t] Jump threading (jumps to jumps with identical or inverse conditions)
+5 . [t] CSE
+6 . *** Conversion to SSA
+7 . [t] SSA Based DCE
+8 . *** Conversion to LLVM
+9 . UnSSA
+10. GCSE
+11. LICM
+12. Strength Reduction
+13. Loop unrolling
+14. [t] CSE
+15. [t] DCE
+16. Instruction combination, register movement, scheduling... etc.
+
+I've marked optimizations with a [t] to indicate things that I believe to
+be relatively trivial to implement in LLVM itself. The time consuming
+things to reimplement would be SSA based PRE, Strength reduction & loop
+unrolling... these would be the major things we would miss out on if we
+did LLVM creation from tree code [inlining and other high level
+optimizations are done on the tree representation].
+
+Given the lack of "strong" optimizations that would take a long time to
+reimplement, I am leaning a bit more towards creating LLVM from the tree
+code. Especially given that SGI has GPL'd their compiler, including many
+SSA based optimizations that could be adapted (besides the fact that their
+code looks MUCH nicer than GCC :)
+
+Even if we choose to do LLVM code emission from RTL, we will almost
+certainly want to move LLVM emission from step 8 down until at least CSE
+has been rerun... which causes me to wonder if the SSA generation code
+will still work (due to global variable dependencies and stuff). I assume
+that it can be made to work, but might be a little more involved than we
+would like.
+
+I'm continuing to look at the Tree -> RTL code. It is pretty gross
+because they do some of the translation a statement at a time, and some
+of it a function at a time... I'm not quite clear why and how the
+distinction is drawn, but it does not appear that there is a wonderful
+place to attach extra info.
+
+Anyways, I'm proceeding with the RTL -> LLVM conversion phase for now. We
+can talk about this more on Monday.
+
+Wouldn't it be nice if there were a obvious decision to be made? :)
+
+-Chris
+
diff --git a/docs/HistoricalNotes/2001-06-01-GCCOptimizations2.txt b/docs/HistoricalNotes/2001-06-01-GCCOptimizations2.txt
new file mode 100644
index 00000000000..e61042fd657
--- /dev/null
+++ b/docs/HistoricalNotes/2001-06-01-GCCOptimizations2.txt
@@ -0,0 +1,71 @@
+Date: Fri, 1 Jun 2001 17:08:44 -0500 (CDT)
+From: Chris Lattner <sabre@nondot.org>
+To: Vikram S. Adve <vadve@cs.uiuc.edu>
+Subject: RE: Interesting: GCC passes
+
+> That is very interesting. I agree that some of these could be done on LLVM
+> at link-time, but it is the extra time required that concerns me. Link-time
+> optimization is severely time-constrained.
+
+If we were to reimplement any of these optimizations, I assume that we
+could do them a translation unit at a time, just as GCC does now. This
+would lead to a pipeline like this:
+
+Static optimizations, xlation unit at a time:
+.c --GCC--> .llvm --llvmopt--> .llvm
+
+Link time optimizations:
+.llvm --llvm-ld--> .llvm --llvm-link-opt--> .llvm
+
+Of course, many optimizations could be shared between llvmopt and
+llvm-link-opt, but the wouldn't need to be shared... Thus compile time
+could be faster, because we are using a "smarter" IR (SSA based).
+
+> BTW, about SGI, "borrowing" SSA-based optimizations from one compiler and
+> putting it into another is not necessarily easier than re-doing it.
+> Optimization code is usually heavily tied in to the specific IR they use.
+
+Understood. The only reason that I brought this up is because SGI's IR is
+more similar to LLVM than it is different in many respects (SSA based,
+relatively low level, etc), and could be easily adapted. Also their
+optimizations are written in C++ and are actually somewhat
+structured... of course it would be no walk in the park, but it would be
+much less time consuming to adapt, say, SSA-PRE than to rewrite it.
+
+> But your larger point is valid that adding SSA based optimizations is
+> feasible and should be fun. (Again, link time cost is the issue.)
+
+Assuming linktime cost wasn't an issue, the question is:
+Does using GCC's backend buy us anything?
+
+> It also occurs to me that GCC is probably doing quite a bit of back-end
+> optimization (step 16 in your list). Do you have a breakdown of that?
+
+Not really. The irritating part of GCC is that it mixes it all up and
+doesn't have a clean separation of concerns. A lot of the "back end
+optimization" happens right along with other data optimizations (ie, CSE
+of machine specific things).
+
+As far as REAL back end optimizations go, it looks something like this:
+
+1. Instruction combination: try to make CISCy instructions, if available
+2. Register movement: try to get registers in the right places for the
+architecture to avoid register to register moves. For example, try to get
+the first argument of a function to naturally land in %o0 for sparc.
+3. Instruction scheduling: 'nuff said :)
+4. Register class preferencing: ??
+5. Local register allocation
+6. global register allocation
+7. Spilling
+8. Local regalloc
+9. Jump optimization
+10. Delay slot scheduling
+11. Branch shorting for CISC machines
+12. Instruction selection & peephole optimization
+13. Debug info output
+
+But none of this would be usable for LLVM anyways, unless we were using
+GCC as a static compiler.
+
+-Chris
+
diff --git a/docs/HistoricalNotes/2001-06-20-.NET-Differences.txt b/docs/HistoricalNotes/2001-06-20-.NET-Differences.txt
new file mode 100644
index 00000000000..1bc2eae746c
--- /dev/null
+++ b/docs/HistoricalNotes/2001-06-20-.NET-Differences.txt
@@ -0,0 +1,30 @@
+Date: Wed, 20 Jun 2001 12:32:22 -0500
+From: Vikram Adve <vadve@cs.uiuc.edu>
+To: Chris Lattner <lattner@cs.uiuc.edu>
+Subject: .NET vs. our VM
+
+One significant difference between .NET CLR and our VM is that the CLR
+includes full information about classes and inheritance. In fact, I just
+sat through the paper on adding templates to .NET CLR, and the speaker
+indicated that the goal seems to be to do simple static compilation (very
+little lowering or optimization). Also, the templates implementation in CLR
+"relies on dynamic class loading and JIT compilation".
+
+This is an important difference because I think there are some significant
+advantages to have a much lower level VM layer, and do significant static
+analysis and optimization.
+
+I also talked to the lead guy for KAI's C++ compiler (Arch Robison) and he
+said that SGI and other commercial compilers have included options to export
+their *IR* next to the object code (i.e., .il files) and use them for
+link-time code generation. In fact, he said that the .o file was nearly
+empty and was entirely generated from the .il at link-time. But he agreed
+that this limited the link-time interprocedural optimization to modules
+compiled by the same compiler, whereas our approach allows us to link and
+optimize modules from multiple different compilers. (Also, of course, they
+don't do anything for runtime optimization).
+
+All issues to bring up in Related Work.
+
+--Vikram
+
diff --git a/docs/HistoricalNotes/2001-07-06-LoweringIRForCodeGen.txt b/docs/HistoricalNotes/2001-07-06-LoweringIRForCodeGen.txt
new file mode 100644
index 00000000000..3e10416fe67
--- /dev/null
+++ b/docs/HistoricalNotes/2001-07-06-LoweringIRForCodeGen.txt
@@ -0,0 +1,31 @@
+Date: Fri, 6 Jul 2001 16:56:56 -0500
+From: Vikram S. Adve <vadve@cs.uiuc.edu>
+To: Chris Lattner <lattner@cs.uiuc.edu>
+Subject: lowering the IR
+
+BTW, I do think that we should consider lowering the IR as you said. I
+didn't get time to raise it today, but it comes up with the SPARC
+move-conditional instruction. I don't think we want to put that in the core
+VM -- it is a little too specialized. But without a corresponding
+conditional move instruction in the VM, it is pretty difficult to maintain a
+close mapping between VM and machine code. Other architectures may have
+other such instructions.
+
+What I was going to suggest was that for a particular processor, we define
+additional VM instructions that match some of the unusual opcodes on the
+processor but have VM semantics otherwise, i.e., all operands are in SSA
+form and typed. This means that we can re-generate core VM code from the
+more specialized code any time we want (so that portability is not lost).
+
+Typically, a static compiler like gcc would generate just the core VM, which
+is relatively portable. Anyone (an offline tool, the linker, etc., or even
+the static compiler itself if it chooses) can transform that into more
+specialized target-specific VM code for a particular architecture. If the
+linker does it, it can do it after all machine-independent optimizations.
+This would be the most convenient, but not necessary.
+
+The main benefit of lowering will be that we will be able to retain a close
+mapping between VM and machine code.
+
+--Vikram
+
diff --git a/docs/HistoricalNotes/2001-09-18-OptimizeExceptions.txt b/docs/HistoricalNotes/2001-09-18-OptimizeExceptions.txt
new file mode 100644
index 00000000000..9379081018d
--- /dev/null
+++ b/docs/HistoricalNotes/2001-09-18-OptimizeExceptions.txt
@@ -0,0 +1,56 @@
+Date: Tue, 18 Sep 2001 00:38:37 -0500 (CDT)
+From: Chris Lattner <sabre@nondot.org>
+To: Vikram S. Adve <vadve@cs.uiuc.edu>
+Subject: Idea for a simple, useful link time optimization
+
+
+In C++ programs, exceptions suck, and here's why:
+
+1. In virtually all function calls, you must assume that the function
+ throws an exception, unless it is defined as 'nothrow'. This means
+ that every function call has to have code to invoke dtors on objects
+ locally if one is thrown by the function. Most functions don't throw
+ exceptions, so this code is dead [with all the bad effects of dead
+ code, including icache pollution].
+2. Declaring a function nothrow causes catch blocks to be added to every
+ call that isnot provably nothrow. This makes them very slow.
+3. Extra extraneous exception edges reduce the opportunity for code
+ motion.
+4. EH is typically implemented with large lookup tables. Ours is going to
+ be much smaller (than the "standard" way of doing it) to start with,
+ but eliminating it entirely would be nice. :)
+5. It is physically impossible to correctly put (accurate, correct)
+ exception specifications on generic, templated code. But it is trivial
+ to analyze instantiations of said code.
+6. Most large C++ programs throw few exceptions. Most well designed
+ programs only throw exceptions in specific planned portions of the
+ code.
+
+Given our _planned_ model of handling exceptions, all of this would be
+pretty trivial to eliminate through some pretty simplistic interprocedural
+analysis. The DCE factor alone could probably be pretty significant. The
+extra code motion opportunities could also be exploited though...
+
+Additionally, this optimization can be implemented in a straight forward
+conservative manner, allowing libraries to be optimized or individual
+files even (if there are leaf functions visible in the translation unit
+that are called).
+
+I think it's a reasonable optimization that hasn't really been addressed
+(because assembly is way too low level for this), and could have decent
+payoffs... without being a overly complex optimization.
+
+After I wrote all of that, I found this page that is talking about
+basically the same thing I just wrote, except that it is translation unit
+at a time, tree based approach:
+http://www.ocston.org/~jls/ehopt.html
+
+but is very useful from "expected gain" and references perspective. Note
+that their compiler is apparently unable to inline functions that use
+exceptions, so there numbers are pretty worthless... also our results
+would (hopefully) be better because it's interprocedural...
+
+What do you think?
+
+-Chris
+
diff --git a/docs/HistoricalNotes/2002-05-12-InstListChange.txt b/docs/HistoricalNotes/2002-05-12-InstListChange.txt
new file mode 100644
index 00000000000..638682b49fd
--- /dev/null
+++ b/docs/HistoricalNotes/2002-05-12-InstListChange.txt
@@ -0,0 +1,55 @@
+Date: Sun, 12 May 2002 17:12:53 -0500 (CDT)
+From: Chris Lattner <sabre@nondot.org>
+To: "Vikram S. Adve" <vadve@cs.uiuc.edu>
+Subject: LLVM change
+
+There is a fairly fundemental change that I would like to make to the LLVM
+infrastructure, but I'd like to know if you see any drawbacks that I
+don't...
+
+Basically right now at the basic block level, each basic block contains an
+instruction list (returned by getInstList()) that is a ValueHolder of
+instructions. To iterate over instructions, we must actually iterate over
+the instlist, and access the instructions through the instlist.
+
+To add or remove an instruction from a basic block, we need to get an
+iterator to an instruction, which, given just an Instruction*, requires a
+linear search of the basic block the instruction is contained in... just
+to insert an instruction before another instruction, or to delete an
+instruction! This complicates algorithms that should be very simple (like
+simple constant propagation), because they aren't actually sparse anymore,
+they have to traverse basic blocks to remove constant propogated
+instructions.
+
+Additionally, adding or removing instructions to a basic block
+_invalidates all iterators_ pointing into that block, which is really
+irritating.
+
+To fix these problems (and others), I would like to make the ordering of
+the instructions be represented with a doubly linked list in the
+instructions themselves, instead of an external data structure. This is
+how many other representations do it, and frankly I can't remember why I
+originally implemented it the way I did.
+
+Long term, all of the code that depends on the nasty features in the
+instruction list (which can be found by grep'ing for getInstList()) will
+be changed to do nice local transformations. In the short term, I'll
+change the representation, but preserve the interface (including
+getInstList()) so that all of the code doesn't have to change.
+
+Iteration over the instructions in a basic block remains the simple:
+for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) ...
+
+But we will also support:
+for (Instruction *I = BB->front(); I; I = I->getNext()) ...
+
+After converting instructions over, I'll convert basic blocks and
+functions to have a similar interface.
+
+The only negative aspect of this change that I see is that it increases
+the amount of memory consumed by one pointer per instruction. Given the
+benefits, I think this is a very reasonable tradeoff.
+
+What do you think?
+
+-Chris
diff --git a/docs/HistoricalNotes/2002-06-25-MegaPatchInfo.txt b/docs/HistoricalNotes/2002-06-25-MegaPatchInfo.txt
new file mode 100644
index 00000000000..2ca46117ca8
--- /dev/null
+++ b/docs/HistoricalNotes/2002-06-25-MegaPatchInfo.txt
@@ -0,0 +1,72 @@
+Changes:
+* Change the casting code to be const correct. Now, doing this is invalid:
+ const Value *V = ...;
+ Instruction *I = dyn_cast<Instruction>(V);
+ instead, the second line should be:
+ const Instruction *I = dyn_cast<Instruction>(V);
+
+* Change the casting code to allow casting a reference value thus:
+ const Value &V = ...;
+ Instruction &I = cast<Instruction>(V);
+
+ dyn_cast does not work with references, because it must return a null pointer
+ on failure.
+
+* Fundamentally change how instructions and other values are represented.
+ Before, every llvm container was an instance of the ValueHolder template,
+ instantiated for each container type. This ValueHolder was effectively a
+ wrapper around a vector of pointers to the sub-objects.
+
+ Now, instead of having a vector to pointers of objects, the objects are
+ maintained in a doubly linked list of values (ie each Instruction now has
+ Next & Previous fields). The containers are now instances of ilist (intrusive
+ linked list class), which use the next and previous fields to chain them
+ together. The advantage of this implementation is that iterators can be
+ formed directly from pointers to the LLVM value, and invalidation is much
+ easier to handle.
+
+* As part of the above change, dereferencing an iterator (for example:
+ BasicBlock::iterator) now produces a reference to the underlying type (same
+ example: Instruction&) instead of a pointer to the underlying object. This
+ makes it much easier to write nested loops that iterator over things, changing
+ this:
+
+ for (Function::iterator BI = Func->begin(); BI != Func->end(); ++BI)
+ for (BasicBlock::iterator II = (*BI)->begin(); II != (*BI)->end(); ++II)
+ (*II)->dump();
+
+ into:
+
+ for (Function::iterator BI = Func->begin(); BI != Func->end(); ++BI)
+ for (BasicBlock::iterator II = BI->begin(); II != BI->end(); ++II)
+ II->dump();
+
+ which is much more natural and what users expect.
+
+* Simplification of #include's: Before, it was necessary for a .cpp file to
+ include every .h file that it used. Now things are batched a little bit more
+ to make it easier to use. Specifically, the include graph now includes these
+ edges:
+ Module.h -> Function.h, GlobalVariable.h
+ Function.h -> BasicBlock.h, Argument.h
+ BasicBlock.h -> Instruction.h
+
+ Which means that #including Function.h is usually sufficient for getting the
+ lower level #includes.
+
+* Printing out a Value* has now changed: Printing a Value* will soon print out
+ the address of the value instead of the contents of the Value. To print out
+ the contents, you must convert it to a reference with (for example)
+ 'cout << *I' instead of 'cout << I;'. This conversion is not yet complete,
+ but will be eventually. In the mean time, both forms print out the contents.
+
+* References are used much more throughout the code base. In general, if a
+ pointer is known to never be null, it is passed in as a reference instead of a
+ pointer. For example, the instruction visitor class uses references instead
+ of pointers, and that Pass subclasses now all receive references to Values
+ instead of pointers, because they may never be null.
+
+* The Function class now has helper functions for accessing the Arguments list.
+ Instead of having to go through getArgumentList for simple things like
+ iterator over the arguments, now the a*() methods can be used to access them.
+
diff --git a/docs/HistoricalNotes/2003-01-23-CygwinNotes.txt b/docs/HistoricalNotes/2003-01-23-CygwinNotes.txt
new file mode 100644
index 00000000000..fbe811d627f
--- /dev/null
+++ b/docs/HistoricalNotes/2003-01-23-CygwinNotes.txt
@@ -0,0 +1,28 @@
+Date: Mon, 20 Jan 2003 00:00:28 -0600
+From: Brian R. Gaeke <gaeke@uiuc.edu>
+Subject: windows vs. llvm
+
+If you're interested, here are some of the major problems compiling LLVM
+under Cygwin and/or Mingw.
+
+1. Cygwin doesn't have <inttypes.h> or <stdint.h>, so all the INT*_MAX
+ symbols and standard int*_t types are off in limbo somewhere. Mingw has
+ <stdint.h>, but Cygwin doesn't like it.
+
+2. Mingw doesn't have <dlfcn.h> (because Windows doesn't have it.)
+
+3. SA_SIGINFO and friends are not around; only signal() seems to work.
+
+4. Relink, aka ld -r, doesn't work (probably an ld bug); you need
+ DONT_BUILD_RELINKED. This breaks all the tools makefiles; you just need to
+ change them to have .a's.
+
+5. There isn't a <values.h>.
+
+6. There isn't a mallinfo() (or, at least, it's documented, but it doesn't seem
+ to link).
+
+7. The version of Bison that cygwin (and newer Linux versions) comes with
+ does not like = signs in rules. Burg's gram.yc source file uses them. I think
+ you can just take them out.
+
diff --git a/docs/HistoricalNotes/2003-06-25-Reoptimizer1.txt b/docs/HistoricalNotes/2003-06-25-Reoptimizer1.txt
new file mode 100644
index 00000000000..a7457846395
--- /dev/null
+++ b/docs/HistoricalNotes/2003-06-25-Reoptimizer1.txt
@@ -0,0 +1,137 @@
+Wed Jun 25 15:13:51 CDT 2003
+
+First-level instrumentation
+---------------------------
+
+We use opt to do Bytecode-to-bytecode instrumentation. Look at
+back-edges and insert llvm_first_trigger() function call which takes
+no arguments and no return value. This instrumentation is designed to
+be easy to remove, for instance by writing a NOP over the function
+call instruction.
+
+Keep count of every call to llvm_first_trigger(), and maintain
+counters in a map indexed by return address. If the trigger count
+exceeds a threshold, we identify a hot loop and perform second-level
+instrumentation on the hot loop region (the instructions between the
+target of the back-edge and the branch that causes the back-edge). We
+do not move code across basic-block boundaries.
+
+
+Second-level instrumentation
+---------------------------
+
+We remove the first-level instrumentation by overwriting the CALL to
+llvm_first_trigger() with a NOP.
+
+The reoptimizer maintains a map between machine-code basic blocks and
+LLVM BasicBlock*s. We only keep track of paths that start at the
+first machine-code basic block of the hot loop region.
+
+How do we keep track of which edges to instrument, and which edges are
+exits from the hot region? 3 step process.
+
+1) Do a DFS from the first machine-code basic block of the hot loop
+region and mark reachable edges.
+
+2) Do a DFS from the last machine-code basic block of the hot loop
+region IGNORING back edges, and mark the edges which are reachable in
+1) and also in 2) (i.e., must be reachable from both the start BB and
+the end BB of the hot region).
+
+3) Mark BBs which end in edges that exit the hot region; we need to
+instrument these differently.
+
+Assume that there is 1 free register. On SPARC we use %g1, which LLC
+has agreed not to use. Shift a 1 into it at the beginning. At every
+edge which corresponds to a conditional branch, we shift 0 for not
+taken and 1 for taken into a register. This uniquely numbers the paths
+through the hot region. Silently fail if we need more than 64 bits.
+
+At the end BB we call countPath and increment the counter based on %g1
+and the return address of the countPath call. We keep track of the
+number of iterations and the number of paths. We only run this
+version 30 or 40 times.
+
+Find the BBs that total 90% or more of execution, and aggregate them
+together to form our trace. But we do not allow more than 5 paths; if
+we have more than 5 we take the ones that are executed the most. We
+verify our assumption that we picked a hot back-edge in first-level
+instrumentation, by making sure that the number of times we took an
+exit edge from the hot trace is less than 10% of the number of
+iterations.
+
+LLC has been taught to recognize llvm_first_trigger() calls and NOT
+generate saves and restores of caller-saved registers around these
+calls.
+
+
+Phase behavior
+--------------
+
+We turn off llvm_first_trigger() calls with NOPs, but this would hide
+phase behavior from us (when some funcs/traces stop being hot and
+others become hot.)
+
+We have a SIGALRM timer that counts time for us. Every time we get a
+SIGALRM we look at our priority queue of locations where we have
+removed llvm_first_trigger() calls. Each location is inserted along
+with a time when we will next turn instrumentation back on for that
+call site. If the time has arrived for a particular call site, we pop
+that off the prio. queue and turn instrumentation back on for that
+call site.
+
+
+Generating traces
+-----------------
+
+When we finally generate an optimized trace we first copy the code
+into the trace cache. This leaves us with 3 copies of the code: the
+original code, the instrumented code, and the optimized trace. The
+optimized trace does not have instrumentation. The original code and
+the instrumented code are modified to have a branch to the trace
+cache, where the optimized traces are kept.
+
+We copy the code from the original to the instrumentation version
+by tracing the LLVM-to-Machine code basic block map and then copying
+each machine code basic block we think is in the hot region into the
+trace cache. Then we instrument that code. The process is similar for
+generating the final optimized trace; we copy the same basic blocks
+because we might need to put in fixup code for exit BBs.
+
+LLVM basic blocks are not typically used in the Reoptimizer except
+for the mapping information.
+
+We are restricted to using single instructions to branch between the
+original code, trace, and instrumented code. So we have to keep the
+code copies in memory near the original code (they can't be far enough
+away that a single pc-relative branch would not work.) Malloc() or
+data region space is too far away. this impacts the design of the
+trace cache.
+
+We use a dummy function that is full of a bunch of for loops which we
+overwrite with trace-cache code. The trace manager keeps track of
+whether or not we have enough space in the trace cache, etc.
+
+The trace insertion routine takes an original start address, a vector
+of machine instructions representing the trace, index of branches and
+their corresponding absolute targets, and index of calls and their
+corresponding absolute targets.
+
+The trace insertion routine is responsible for inserting branches from
+the beginning of the original code to the beginning of the optimized
+trace. This is because at some point the trace cache may run out of
+space and it may have to evict a trace, at which point the branch to
+the trace would also have to be removed. It uses a round-robin
+replacement policy; we have found that this is almost as good as LRU
+and better than random (especially because of problems fitting the new
+trace in.)
+
+We cannot deal with discontiguous trace cache areas. The trace cache
+is supposed to be cache-line-aligned, but it is not page-aligned.
+
+We generate instrumentation traces and optimized traces into separate
+trace caches. We keep the instrumented code around because you don't
+want to delete a trace when you still might have to return to it
+(i.e., return from a llvm_first_trigger() or countPath() call.)
+
+
diff --git a/docs/HistoricalNotes/2003-06-26-Reoptimizer2.txt b/docs/HistoricalNotes/2003-06-26-Reoptimizer2.txt
new file mode 100644
index 00000000000..ec4b93fea0a
--- /dev/null
+++ b/docs/HistoricalNotes/2003-06-26-Reoptimizer2.txt
@@ -0,0 +1,110 @@
+Thu Jun 26 14:43:04 CDT 2003
+
+Information about BinInterface
+------------------------------
+
+Take in a set of instructions with some particular register
+allocation. It allows you to add, modify, or delete some instructions,
+in SSA form (kind of like LLVM's MachineInstrs.) Then re-allocate
+registers. It assumes that the transformations you are doing are safe.
+It does not update the mapping information or the LLVM representation
+for the modified trace (so it would not, for instance, support
+multiple optimization passes; passes have to be aware of and update
+manually the mapping information.)
+
+The way you use it is you take the original code and provide it to
+BinInterface; then you do optimizations to it, then you put it in the
+trace cache.
+
+The BinInterface tries to find live-outs for traces so that it can do
+register allocation on just the trace, and stitch the trace back into
+the original code. It has to preserve the live-ins and live-outs when
+it does its register allocation. (On exits from the trace we have
+epilogues that copy live-outs back into the right registers, but
+live-ins have to be in the right registers.)
+
+
+Limitations of BinInterface
+---------------------------
+
+It does copy insertions for PHIs, which it infers from the machine
+code. The mapping info inserted by LLC is not sufficient to determine
+the PHIs.
+
+It does not handle integer or floating-point condition codes and it
+does not handle floating-point register allocation.
+
+It is not aggressively able to use lots of registers.
+
+There is a problem with alloca: we cannot find our spill space for
+spilling registers, normally allocated on the stack, if the trace
+follows an alloca(). What might be an acceptable solution would be to
+disable trace generation on functions that have variable-sized
+alloca()s. Variable-sized allocas in the trace would also probably
+screw things up.
+
+Because of the FP and alloca limitations, the BinInterface is
+completely disabled right now.
+
+
+Demo
+----
+
+This is a demo of the Ball & Larus version that does NOT use 2-level
+profiling.
+
+1. Compile program with llvm-gcc.
+2. Run opt -lowerswitch -paths -emitfuncs on the bytecode.
+ -lowerswitch change switch statements to branches
+ -paths Ball & Larus path-profiling algorithm
+ -emitfuncs emit the table of functions
+3. Run llc to generate SPARC assembly code for the result of step 2.
+4. Use g++ to link the (instrumented) assembly code.
+
+We use a script to do all this:
+------------------------------------------------------------------------------
+#!/bin/sh
+llvm-gcc $1.c -o $1
+opt -lowerswitch -paths -emitfuncs $1.bc > $1.run.bc
+llc -f $1.run.bc
+LIBS=$HOME/llvm_sparc/lib/Debug
+GXX=/usr/dcs/software/evaluation/bin/g++
+$GXX -g -L $LIBS $1.run.s -o $1.run.llc \
+$LIBS/tracecache.o \
+$LIBS/mapinfo.o \
+$LIBS/trigger.o \
+$LIBS/profpaths.o \
+$LIBS/bininterface.o \
+$LIBS/support.o \
+$LIBS/vmcore.o \
+$LIBS/transformutils.o \
+$LIBS/bcreader.o \
+-lscalaropts -lscalaropts -lanalysis \
+-lmalloc -lcpc -lm -ldl
+------------------------------------------------------------------------------
+
+5. Run the resulting binary. You will see output from BinInterface
+(described below) intermixed with the output from the program.
+
+
+Output from BinInterface
+------------------------
+
+BinInterface's debugging code prints out the following stuff in order:
+
+1. Initial code provided to BinInterface with original register
+allocation.
+
+2. Section 0 is the trace prolog, consisting mainly of live-ins and
+register saves which will be restored in epilogs.
+
+3. Section 1 is the trace itself, in SSA form used by BinInterface,
+along with the PHIs that are inserted.
+PHIs are followed by the copies that implement them.
+Each branch (i.e., out of the trace) is annotated with the
+section number that represents the epilog it branches to.
+
+4. All the other sections starting with Section 2 are trace epilogs.
+Every branch from the trace has to go to some epilog.
+
+5. After the last section is the register allocation output.
diff --git a/docs/HistoricalNotes/2007-OriginalClangReadme.txt b/docs/HistoricalNotes/2007-OriginalClangReadme.txt
new file mode 100644
index 00000000000..611dc9d2c01
--- /dev/null
+++ b/docs/HistoricalNotes/2007-OriginalClangReadme.txt
@@ -0,0 +1,178 @@
+//===----------------------------------------------------------------------===//
+// C Language Family Front-end
+//===----------------------------------------------------------------------===//
+ Chris Lattner
+
+I. Introduction:
+
+ clang: noun
+ 1. A loud, resonant, metallic sound.
+ 2. The strident call of a crane or goose.
+ 3. C-language family front-end toolkit.
+
+ The world needs better compiler tools, tools which are built as libraries. This
+ design point allows reuse of the tools in new and novel ways. However, building
+ the tools as libraries isn't enough: they must have clean APIs, be as
+ decoupled from each other as possible, and be easy to modify/extend. This
+ requires clean layering, decent design, and avoiding tying the libraries to a
+ specific use. Oh yeah, did I mention that we want the resultant libraries to
+ be as fast as possible? :)
+
+ This front-end is built as a component of the LLVM toolkit that can be used
+ with the LLVM backend or independently of it. In this spirit, the API has been
+ carefully designed as the following components:
+
+ libsupport - Basic support library, reused from LLVM.
+
+ libsystem - System abstraction library, reused from LLVM.
+
+ libbasic - Diagnostics, SourceLocations, SourceBuffer abstraction,
+ file system caching for input source files. This depends on
+ libsupport and libsystem.
+
+ libast - Provides classes to represent the C AST, the C type system,
+ builtin functions, and various helpers for analyzing and
+ manipulating the AST (visitors, pretty printers, etc). This
+ library depends on libbasic.
+
+
+ liblex - C/C++/ObjC lexing and preprocessing, identifier hash table,
+ pragma handling, tokens, and macros. This depends on libbasic.
+
+ libparse - C (for now) parsing and local semantic analysis. This library
+ invokes coarse-grained 'Actions' provided by the client to do
+ stuff (e.g. libsema builds ASTs). This depends on liblex.
+
+ libsema - Provides a set of parser actions to build a standardized AST
+ for programs. AST's are 'streamed' out a top-level declaration
+ at a time, allowing clients to use decl-at-a-time processing,
+ build up entire translation units, or even build 'whole
+ program' ASTs depending on how they use the APIs. This depends
+ on libast and libparse.
+
+ librewrite - Fast, scalable rewriting of source code. This operates on
+ the raw syntactic text of source code, allowing a client
+ to insert and delete text in very large source files using
+ the same source location information embedded in ASTs. This
+ is intended to be a low-level API that is useful for
+ higher-level clients and libraries such as code refactoring.
+
+ libanalysis - Source-level dataflow analysis useful for performing analyses
+ such as computing live variables. It also includes a
+ path-sensitive "graph-reachability" engine for writing
+ analyses that reason about different possible paths of
+ execution through source code. This is currently being
+ employed to write a set of checks for finding bugs in software.
+
+ libcodegen - Lower the AST to LLVM IR for optimization & codegen. Depends
+ on libast.
+
+ clang - An example driver, client of the libraries at various levels.
+ This depends on all these libraries, and on LLVM VMCore.
+
+ This front-end has been intentionally built as a DAG of libraries, making it
+ easy to reuse individual parts or replace pieces if desired. For example, to
+ build a preprocessor, you take the Basic and Lexer libraries. If you want an
+ indexer, you take those plus the Parser library and provide some actions for
+ indexing. If you want a refactoring, static analysis, or source-to-source
+ compiler tool, it makes sense to take those plus the AST building and semantic
+ analyzer library. Finally, if you want to use this with the LLVM backend,
+ you'd take these components plus the AST to LLVM lowering code.
+
+ In the future I hope this toolkit will grow to include new and interesting
+ components, including a C++ front-end, ObjC support, and a whole lot of other
+ things.
+
+ Finally, it should be pointed out that the goal here is to build something that
+ is high-quality and industrial-strength: all the obnoxious features of the C
+ family must be correctly supported (trigraphs, preprocessor arcana, K&R-style
+ prototypes, GCC/MS extensions, etc). It cannot be used if it is not 'real'.
+
+
+II. Usage of clang driver:
+
+ * Basic Command-Line Options:
+ - Help: clang --help
+ - Standard GCC options accepted: -E, -I*, -i*, -pedantic, -std=c90, etc.
+ - To make diagnostics more gcc-like: -fno-caret-diagnostics -fno-show-column
+ - Enable metric printing: -stats
+
+ * -fsyntax-only is currently the default mode.
+
+ * -E mode works the same way as GCC.
+
+ * -Eonly mode does all preprocessing, but does not print the output,
+ useful for timing the preprocessor.
+
+ * -fsyntax-only is currently partially implemented, lacking some
+ semantic analysis (some errors and warnings are not produced).
+
+ * -parse-noop parses code without building an AST. This is useful
+ for timing the cost of the parser without including AST building
+ time.
+
+ * -parse-ast builds ASTs, but doesn't print them. This is most
+ useful for timing AST building vs -parse-noop.
+
+ * -parse-ast-print pretty prints most expression and statements nodes.
+
+ * -parse-ast-check checks that diagnostic messages that are expected
+ are reported and that those which are reported are expected.
+
+ * -dump-cfg builds ASTs and then CFGs. CFGs are then pretty-printed.
+
+ * -view-cfg builds ASTs and then CFGs. CFGs are then visualized by
+ invoking Graphviz.
+
+ For more information on getting Graphviz to work with clang/LLVM,
+ see: http://llvm.org/docs/ProgrammersManual.html#ViewGraph
+
+
+III. Current advantages over GCC:
+
+ * Column numbers are fully tracked (no 256 col limit, no GCC-style pruning).
+ * All diagnostics have column numbers, includes 'caret diagnostics', and they
+ highlight regions of interesting code (e.g. the LHS and RHS of a binop).
+ * Full diagnostic customization by client (can format diagnostics however they
+ like, e.g. in an IDE or refactoring tool) through DiagnosticClient interface.
+ * Built as a framework, can be reused by multiple tools.
+ * All languages supported linked into same library (no cc1,cc1obj, ...).
+ * mmap's code in read-only, does not dirty the pages like GCC (mem footprint).
+ * LLVM License, can be linked into non-GPL projects.
+ * Full diagnostic control, per diagnostic. Diagnostics are identified by ID.
+ * Significantly faster than GCC at semantic analysis, parsing, preprocessing
+ and lexing.
+ * Defers exposing platform-specific stuff to as late as possible, tracks use of
+ platform-specific features (e.g. #ifdef PPC) to allow 'portable bytecodes'.
+ * The lexer doesn't rely on the "lexer hack": it has no notion of scope and
+ does not categorize identifiers as types or variables -- this is up to the
+ parser to decide.
+
+Potential Future Features:
+
+ * Fine grained diag control within the source (#pragma enable/disable warning).
+ * Better token tracking within macros? (Token came from this line, which is
+ a macro argument instantiated here, recursively instantiated here).
+ * Fast #import with a module system.
+ * Dependency tracking: change to header file doesn't recompile every function
+ that texually depends on it: recompile only those functions that need it.
+ This is aka 'incremental parsing'.
+
+
+IV. Missing Functionality / Improvements
+
+Lexer:
+ * Source character mapping. GCC supports ASCII and UTF-8.
+ See GCC options: -ftarget-charset and -ftarget-wide-charset.
+ * Universal character support. Experimental in GCC, enabled with
+ -fextended-identifiers.
+ * -fpreprocessed mode.
+
+Preprocessor:
+ * #assert/#unassert
+ * MSExtension: "L#param" stringizes to a wide string literal.
+ * Add support for -M*
+
+Traditional Preprocessor:
+ * Currently, we have none. :)
+
diff --git a/docs/HowToAddABuilder.rst b/docs/HowToAddABuilder.rst
new file mode 100644
index 00000000000..b0cd2907f97
--- /dev/null
+++ b/docs/HowToAddABuilder.rst
@@ -0,0 +1,90 @@
+.. _how_to_add_a_builder:
+
+===================================================================
+How To Add Your Build Configuration To LLVM Buildbot Infrastructure
+===================================================================
+
+.. sectionauthor:: Galina Kistanova <gkistanova@gmail.com>
+
+Introduction
+============
+
+This document contains information about adding a build configuration and
+buildslave to private slave builder to LLVM Buildbot Infrastructure
+`<http://lab.llvm.org:8011>`_.
+
+
+Steps To Add Builder To LLVM Buildbot
+=====================================
+Volunteers can provide their build machines to work as build slaves to
+public LLVM Buildbot.
+
+Here are the steps you can follow to do so:
+
+#. Check the existing build configurations to make sure the one you are
+ interested in is not covered yet or gets built on your computer much
+ faster than on the existing one. We prefer faster builds so developers
+ will get feedback sooner after changes get committed.
+
+#. The computer you will be registering with the LLVM buildbot
+ infrastructure should have all dependencies installed and you can
+ actually build your configuration successfully. Please check what degree
+ of parallelism (-j param) would give the fastest build. You can build
+ multiple configurations on one computer.
+
+#. Install buildslave (currently we are using buildbot version 0.8.5).
+ Depending on the platform, buildslave could be available to download and
+ install with your packet manager, or you can download it directly from
+ `<http://trac.buildbot.net>`_ and install it manually.
+
+#. Create a designated user account, your buildslave will be running under,
+ and set appropriate permissions.
+
+#. Choose the buildslave root directory (all builds will be placed under
+ it), buildslave access name and password the build master will be using
+ to authenticate your buildslave.
+
+#. Create a buildslave in context of that buildslave account. Point it to
+ the **lab.llvm.org** port **9990** (see `Buildbot documentation,
+ Creating a slave
+ <http://buildbot.net/buildbot/docs/current/full.html#creating-a-slave>`_
+ for more details) by running the following command:
+
+ .. code-block:: bash
+
+ $ buildslave create-slave <buildslave-root-directory> \
+ lab.llvm.org:9990 \
+ <buildslave-access-name> <buildslave-access-password>
+
+#. Fill the buildslave description and admin name/e-mail. Here is an
+ example of the buildslave description::
+
+ Windows 7 x64
+ Core i7 (2.66GHz), 16GB of RAM
+
+ g++.exe (TDM-1 mingw32) 4.4.0
+ GNU Binutils 2.19.1
+ cmake version 2.8.4
+ Microsoft(R) 32-bit C/C++ Optimizing Compiler Version 16.00.40219.01 for 80x86
+
+#. Make sure you can actually start the buildslave successfully. Then set
+ up your buildslave to start automatically at the start up time. See the
+ buildbot documentation for help. You may want to restart your computer
+ to see if it works.
+
+#. Send a patch which adds your build slave and your builder to zorg.
+
+ * slaves are added to ``buildbot/osuosl/master/config/slaves.py``
+ * builders are added to ``buildbot/osuosl/master/config/builders.py``
+
+#. Send the buildslave access name and the access password directly to
+ `Galina Kistanova <mailto:gkistanova@gmail.com>`_, and wait till she
+ will let you know that your changes are applied and buildmaster is
+ reconfigured.
+
+#. Check the status of your buildslave on the `Waterfall Display
+ <http://lab.llvm.org:8011/waterfall>`_ to make sure it is connected, and
+ ``http://lab.llvm.org:8011/buildslaves/<your-buildslave-name>`` to see
+ if administrator contact and slave information are correct.
+
+#. Wait for the first build to succeed and enjoy.
diff --git a/docs/HowToReleaseLLVM.html b/docs/HowToReleaseLLVM.html
new file mode 100644
index 00000000000..30c3d5da5e9
--- /dev/null
+++ b/docs/HowToReleaseLLVM.html
@@ -0,0 +1,581 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>How To Release LLVM To The Public</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+<body>
+
+<h1>How To Release LLVM To The Public</h1>
+<ol>
+ <li><a href="#introduction">Introduction</a></li>
+ <li><a href="#criteria">Qualification Criteria</a></li>
+ <li><a href="#introduction">Release Timeline</a></li>
+ <li><a href="#process">Release Process</a></li>
+</ol>
+<div class="doc_author">
+ <p>Written by <a href="mailto:tonic@nondot.org">Tanya Lattner</a>,
+ <a href="mailto:rspencer@x10sys.com">Reid Spencer</a>,
+ <a href="mailto:criswell@cs.uiuc.edu">John Criswell</a>, &amp;
+ <a href="mailto:wendling@apple.com">Bill Wendling</a>
+ </p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="introduction">Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>This document contains information about successfully releasing LLVM &mdash;
+ including subprojects: e.g., <tt>clang</tt> and <tt>dragonegg</tt> &mdash; to
+ the public. It is the Release Manager's responsibility to ensure that a high
+ quality build of LLVM is released.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="process">Release Timeline</a></h2>
+<!-- *********************************************************************** -->
+<div>
+
+<p>LLVM is released on a time based schedule &mdash; roughly every 6 months. We
+ do not normally have dot releases because of the nature of LLVM's incremental
+ development philosophy. That said, the only thing preventing dot releases for
+ critical bug fixes from happening is a lack of resources &mdash; testers,
+ machines, time, etc. And, because of the high quality we desire for LLVM
+ releases, we cannot allow for a truncated form of release qualification.</p>
+
+<p>The release process is roughly as follows:</p>
+
+<ul>
+ <li><p>Set code freeze and branch creation date for 6 months after last code
+ freeze date. Announce release schedule to the LLVM community and update
+ the website.</p></li>
+
+ <li><p>Create release branch and begin release process.</p></li>
+
+ <li><p>Send out release candidate sources for first round of testing. Testing
+ lasts 7-10 days. During the first round of testing, any regressions found
+ should be fixed. Patches are merged from mainline into the release
+ branch. Also, all features need to be completed during this time. Any
+ features not completed at the end of the first round of testing will be
+ removed or disabled for the release.</p></li>
+
+ <li><p>Generate and send out the second release candidate sources. Only
+ <em>critial</em> bugs found during this testing phase will be fixed. Any
+ bugs introduced by merged patches will be fixed. If so a third round of
+ testing is needed.</p></li>
+
+ <li><p>The release notes are updated.</p></li>
+
+ <li><p>Finally, release!</p></li>
+</ul>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="process">Release Process</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<ol>
+ <li><a href="#release-admin">Release Administrative Tasks</a>
+ <ol>
+ <li><a href="#branch">Create Release Branch</a></li>
+ <li><a href="#verchanges">Update Version Numbers</a></li>
+ </ol>
+ </li>
+ <li><a href="#release-build">Building the Release</a>
+ <ol>
+ <li><a href="#dist">Build the LLVM Source Distributions</a></li>
+ <li><a href="#build">Build LLVM</a></li>
+ <li><a href="#clangbin">Build the Clang Binary Distribution</a></li>
+ <li><a href="#target-build">Target Specific Build Details</a></li>
+ </ol>
+ </li>
+ <li><a href="#release-qualify">Release Qualification Criteria</a>
+ <ol>
+ <li><a href="#llvm-qualify">Qualify LLVM</a></li>
+ <li><a href="#clang-qualify">Qualify Clang</a></li>
+ <li><a href="#targets">Specific Target Qualification Details</a></li>
+ </ol>
+ </li>
+
+ <li><a href="#commTest">Community Testing</a></li>
+ <li><a href="#release-patch">Release Patch Rules</a></li>
+ <li><a href="#release-final">Release final tasks</a>
+ <ol>
+ <li><a href="#updocs">Update Documentation</a></li>
+ <li><a href="#tag">Tag the LLVM Final Release</a></li>
+ <li><a href="#updemo">Update the LLVM Demo Page</a></li>
+ <li><a href="#webupdates">Update the LLVM Website</a></li>
+ <li><a href="#announce">Announce the Release</a></li>
+ </ol>
+ </li>
+</ol>
+
+<!-- ======================================================================= -->
+<h3><a name="release-admin">Release Administrative Tasks</a></h3>
+
+<div>
+
+<p>This section describes a few administrative tasks that need to be done for
+ the release process to begin. Specifically, it involves:</p>
+
+<ul>
+ <li>Creating the release branch,</li>
+ <li>Setting version numbers, and</li>
+ <li>Tagging release candidates for the release team to begin testing</li>
+</ul>
+
+<!-- ======================================================================= -->
+<h4><a name="branch">Create Release Branch</a></h4>
+
+<div>
+
+<p>Branch the Subversion trunk using the following procedure:</p>
+
+<ol>
+ <li><p>Remind developers that the release branching is imminent and to refrain
+ from committing patches that might break the build. E.g., new features,
+ large patches for works in progress, an overhaul of the type system, an
+ exciting new TableGen feature, etc.</p></li>
+
+ <li><p>Verify that the current Subversion trunk is in decent shape by
+ examining nightly tester and buildbot results.</p></li>
+
+ <li><p>Create the release branch for <tt>llvm</tt>, <tt>clang</tt>,
+ the <tt>test-suite</tt>, and <tt>dragonegg</tt> from the last known good
+ revision. The branch's name is <tt>release_<i>XY</i></tt>,
+ where <tt>X</tt> is the major and <tt>Y</tt> the minor release
+ numbers. The branches should be created using the following commands:</p>
+
+<div class="doc_code">
+<pre>
+$ svn copy https://llvm.org/svn/llvm-project/llvm/trunk \
+ https://llvm.org/svn/llvm-project/llvm/branches/release_<i>XY</i>
+
+$ svn copy https://llvm.org/svn/llvm-project/cfe/trunk \
+ https://llvm.org/svn/llvm-project/cfe/branches/release_<i>XY</i>
+
+$ svn copy https://llvm.org/svn/llvm-project/dragonegg/trunk \
+ https://llvm.org/svn/llvm-project/dragonegg/branches/release_<i>XY</i>
+
+$ svn copy https://llvm.org/svn/llvm-project/test-suite/trunk \
+ https://llvm.org/svn/llvm-project/test-suite/branches/release_<i>XY</i>
+</pre>
+</div></li>
+
+ <li><p>Advise developers that they may now check their patches into the
+ Subversion tree again.</p></li>
+
+ <li><p>The Release Manager should switch to the release branch, because all
+ changes to the release will now be done in the branch. The easiest way to
+ do this is to grab a working copy using the following commands:</p>
+
+<div class="doc_code">
+<pre>
+$ svn co https://llvm.org/svn/llvm-project/llvm/branches/release_<i>XY</i> llvm-<i>X.Y</i>
+
+$ svn co https://llvm.org/svn/llvm-project/cfe/branches/release_<i>XY</i> clang-<i>X.Y</i>
+
+$ svn co https://llvm.org/svn/llvm-project/dragonegg/branches/release_<i>XY</i> dragonegg-<i>X.Y</i>
+
+$ svn co https://llvm.org/svn/llvm-project/test-suite/branches/release_<i>XY</i> test-suite-<i>X.Y</i>
+</pre>
+</div></li>
+</ol>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="verchanges">Update LLVM Version</a></h4>
+
+<div>
+
+<p>After creating the LLVM release branch, update the release branches'
+ <tt>autoconf</tt> and <tt>configure.ac</tt> versions from '<tt>X.Ysvn</tt>'
+ to '<tt>X.Y</tt>'. Update it on mainline as well to be the next version
+ ('<tt>X.Y+1svn</tt>'). Regenerate the configure scripts for both
+ <tt>llvm</tt> and the <tt>test-suite</tt>.</p>
+
+<p>In addition, the version numbers of all the Bugzilla components must be
+ updated for the next release.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="dist">Build the LLVM Release Candidates</a></h4>
+
+<div>
+
+<p>Create release candidates for <tt>llvm</tt>, <tt>clang</tt>,
+ <tt>dragonegg</tt>, and the LLVM <tt>test-suite</tt> by tagging the branch
+ with the respective release candidate number. For instance, to
+ create <b>Release Candidate 1</b> you would issue the following commands:</p>
+
+<div class="doc_code">
+<pre>
+$ svn mkdir https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_<i>XY</i>
+$ svn copy https://llvm.org/svn/llvm-project/llvm/branches/release_<i>XY</i> \
+ https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_<i>XY</i>/rc1
+
+$ svn mkdir https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_<i>XY</i>
+$ svn copy https://llvm.org/svn/llvm-project/cfe/branches/release_<i>XY</i> \
+ https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_<i>XY</i>/rc1
+
+$ svn mkdir https://llvm.org/svn/llvm-project/dragonegg/tags/RELEASE_<i>XY</i>
+$ svn copy https://llvm.org/svn/llvm-project/dragonegg/branches/release_<i>XY</i> \
+ https://llvm.org/svn/llvm-project/dragonegg/tags/RELEASE_<i>XY</i>/rc1
+
+$ svn mkdir https://llvm.org/svn/llvm-project/test-suite/tags/RELEASE_<i>XY</i>
+$ svn copy https://llvm.org/svn/llvm-project/test-suite/branches/release_<i>XY</i> \
+ https://llvm.org/svn/llvm-project/test-suite/tags/RELEASE_<i>XY</i>/rc1
+</pre>
+</div>
+
+<p>Similarly, <b>Release Candidate 2</b> would be named <tt>RC2</tt> and so
+ on. This keeps a permanent copy of the release candidate around for people to
+ export and build as they wish. The final released sources will be tagged in
+ the <tt>RELEASE_<i>XY</i></tt> directory as <tt>Final</tt>
+ (c.f. <a href="#tag">Tag the LLVM Final Release</a>).</p>
+
+<p>The Release Manager may supply pre-packaged source tarballs for users. This
+ can be done with the following commands:</p>
+
+<div class="doc_code">
+<pre>
+$ svn export https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_<i>XY</i>/rc1 llvm-<i>X.Y</i>rc1
+$ svn export https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_<i>XY</i>/rc1 clang-<i>X.Y</i>rc1
+$ svn export https://llvm.org/svn/llvm-project/dragonegg/tags/RELEASE_<i>XY</i>/rc1 dragonegg-<i>X.Y</i>rc1
+$ svn export https://llvm.org/svn/llvm-project/test-suite/tags/RELEASE_<i>XY</i>/rc1 llvm-test-<i>X.Y</i>rc1
+
+$ tar -cvf - llvm-<i>X.Y</i>rc1 | gzip &gt; llvm-<i>X.Y</i>rc1.src.tar.gz
+$ tar -cvf - clang-<i>X.Y</i>rc1 | gzip &gt; clang-<i>X.Y</i>rc1.src.tar.gz
+$ tar -cvf - dragonegg-<i>X.Y</i>rc1 | gzip &gt; dragonegg-<i>X.Y</i>rc1.src.tar.gz
+$ tar -cvf - llvm-test-<i>X.Y</i>rc1 | gzip &gt; llvm-test-<i>X.Y</i>rc1.src.tar.gz
+</pre>
+</div>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="release-build">Building the Release</a></h3>
+
+<div>
+
+<p>The builds of <tt>llvm</tt>, <tt>clang</tt>, and <tt>dragonegg</tt>
+ <em>must</em> be free of errors and warnings in Debug, Release+Asserts, and
+ Release builds. If all builds are clean, then the release passes Build
+ Qualification.</p>
+
+<p>The <tt>make</tt> options for building the different modes:</p>
+
+<table>
+ <tr><th>Mode</th><th>Options</th></tr>
+ <tr align="left"><td>Debug</td><td><tt>ENABLE_OPTIMIZED=0</tt></td></tr>
+ <tr align="left"><td>Release+Asserts</td><td><tt>ENABLE_OPTIMIZED=1</tt></td></tr>
+ <tr align="left"><td>Release</td><td><tt>ENABLE_OPTIMIZED=1 DISABLE_ASSERTIONS=1</tt></td></tr>
+</table>
+
+<!-- ======================================================================= -->
+<h4><a name="build">Build LLVM</a></h4>
+
+<div>
+
+<p>Build <tt>Debug</tt>, <tt>Release+Asserts</tt>, and <tt>Release</tt> versions
+ of <tt>llvm</tt> on all supported platforms. Directions to build
+ <tt>llvm</tt> are <a href="GettingStarted.html#quickstart">here</a>.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="clangbin">Build Clang Binary Distribution</a></h4>
+
+<div>
+
+<p>Creating the <tt>clang</tt> binary distribution
+ (Debug/Release+Asserts/Release) requires performing the following steps for
+ each supported platform:</p>
+
+<ol>
+ <li>Build clang according to the directions
+ <a href="http://clang.llvm.org/get_started.html">here</a>.</li>
+
+ <li>Build both a Debug and Release version of clang. The binary will be the
+ Release build.</lI>
+
+ <li>Package <tt>clang</tt> (details to follow).</li>
+</ol>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="target-build">Target Specific Build Details</a></h4>
+
+<div>
+
+<p>The table below specifies which compilers are used for each Arch/OS
+ combination when qualifying the build of <tt>llvm</tt>, <tt>clang</tt>,
+ and <tt>dragonegg</tt>.</p>
+
+<table>
+ <tr><th>Architecture</th> <th>OS</th> <th>compiler</th></tr>
+ <tr><td>x86-32</td> <td>Mac OS 10.5</td> <td>gcc 4.0.1</td></tr>
+ <tr><td>x86-32</td> <td>Linux</td> <td>gcc 4.2.X, gcc 4.3.X</td></tr>
+ <tr><td>x86-32</td> <td>FreeBSD</td> <td>gcc 4.2.X</td></tr>
+ <tr><td>x86-32</td> <td>mingw</td> <td>gcc 3.4.5</td></tr>
+ <tr><td>x86-64</td> <td>Mac OS 10.5</td> <td>gcc 4.0.1</td></tr>
+ <tr><td>x86-64</td> <td>Linux</td> <td>gcc 4.2.X, gcc 4.3.X</td></tr>
+ <tr><td>x86-64</td> <td>FreeBSD</td> <td>gcc 4.2.X</td></tr>
+</table>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="release-qualify">Building the Release</a></h3>
+
+<div>
+
+<p>A release is qualified when it has no regressions from the previous release
+ (or baseline). Regressions are related to correctness first and performance
+ second. (We may tolerate some minor performance regressions if they are
+ deemed necessary for the general quality of the compiler.)</p>
+
+<p><b>Regressions are new failures in the set of tests that are used to qualify
+ each product and only include things on the list. Every release will have
+ some bugs in it. It is the reality of developing a complex piece of
+ software. We need a very concrete and definitive release criteria that
+ ensures we have monotonically improving quality on some metric. The metric we
+ use is described below. This doesn't mean that we don't care about other
+ criteria, but these are the criteria which we found to be most important and
+ which must be satisfied before a release can go out</b></p>
+
+<!-- ======================================================================= -->
+<h4><a name="llvm-qualify">Qualify LLVM</a></h4>
+
+<div>
+
+<p>LLVM is qualified when it has a clean test run without a front-end. And it
+ has no regressions when using either <tt>clang</tt> or <tt>dragonegg</tt>
+ with the <tt>test-suite</tt> from the previous release.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="clang-qualify">Qualify Clang</a></h4>
+
+<div>
+
+<p><tt>Clang</tt> is qualified when front-end specific tests in the
+ <tt>llvm</tt> dejagnu test suite all pass, clang's own test suite passes
+ cleanly, and there are no regressions in the <tt>test-suite</tt>.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="targets">Specific Target Qualification Details</a></h4>
+
+<div>
+
+<table>
+ <tr><th>Architecture</th> <th>OS</th> <th>clang baseline</th> <th>tests</th></tr>
+ <tr><td>x86-32</td> <td>Linux</td> <td>last release</td> <td>llvm dejagnu, clang tests, test-suite (including spec)</td></tr>
+ <tr><td>x86-32</td> <td>FreeBSD</td> <td>last release</td> <td>llvm dejagnu, clang tests, test-suite</td></tr>
+ <tr><td>x86-32</td> <td>mingw</td> <td>none</td> <td>QT</td></tr>
+ <tr><td>x86-64</td> <td>Mac OS 10.X</td> <td>last release</td> <td>llvm dejagnu, clang tests, test-suite (including spec)</td></tr>
+ <tr><td>x86-64</td> <td>Linux</td> <td>last release</td> <td>llvm dejagnu, clang tests, test-suite (including spec)</td></tr>
+ <tr><td>x86-64</td> <td>FreeBSD</td> <td>last release</td> <td>llvm dejagnu, clang tests, test-suite</td></tr>
+</table>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="commTest">Community Testing</a></h3>
+<div>
+
+<p>Once all testing has been completed and appropriate bugs filed, the release
+ candidate tarballs are put on the website and the LLVM community is
+ notified. Ask that all LLVM developers test the release in 2 ways:</p>
+
+<ol>
+ <li>Download <tt>llvm-<i>X.Y</i></tt>, <tt>llvm-test-<i>X.Y</i></tt>, and the
+ appropriate <tt>clang</tt> binary. Build LLVM. Run <tt>make check</tt> and
+ the full LLVM test suite (<tt>make TEST=nightly report</tt>).</li>
+
+ <li>Download <tt>llvm-<i>X.Y</i></tt>, <tt>llvm-test-<i>X.Y</i></tt>, and the
+ <tt>clang</tt> sources. Compile everything. Run <tt>make check</tt> and
+ the full LLVM test suite (<tt>make TEST=nightly report</tt>).</li>
+</ol>
+
+<p>Ask LLVM developers to submit the test suite report and <tt>make check</tt>
+ results to the list. Verify that there are no regressions from the previous
+ release. The results are not used to qualify a release, but to spot other
+ potential problems. For unsupported targets, verify that <tt>make check</tt>
+ is at least clean.</p>
+
+<p>During the first round of testing, all regressions must be fixed before the
+ second release candidate is tagged.</p>
+
+<p>If this is the second round of testing, the testing is only to ensure that
+ bug fixes previously merged in have not created new major problems. <i>This
+ is not the time to solve additional and unrelated bugs!</i> If no patches are
+ merged in, the release is determined to be ready and the release manager may
+ move onto the next stage.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="release-patch">Release Patch Rules</a></h3>
+
+<div>
+
+<p>Below are the rules regarding patching the release branch:</p>
+
+<ol>
+ <li><p>Patches applied to the release branch may only be applied by the
+ release manager.</p></li>
+
+ <li><p>During the first round of testing, patches that fix regressions or that
+ are small and relatively risk free (verified by the appropriate code
+ owner) are applied to the branch. Code owners are asked to be very
+ conservative in approving patches for the branch. We reserve the right to
+ reject any patch that does not fix a regression as previously
+ defined.</p></li>
+
+ <li><p>During the remaining rounds of testing, only patches that fix critical
+ regressions may be applied.</p></li>
+</ol>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="release-final">Release Final Tasks</a></h3>
+
+<div>
+
+<p>The final stages of the release process involves tagging the "final" release
+ branch, updating documentation that refers to the release, and updating the
+ demo page.</p>
+
+<!-- ======================================================================= -->
+<h4><a name="updocs">Update Documentation</a></h4>
+
+<div>
+
+<p>Review the documentation and ensure that it is up to date. The "Release
+ Notes" must be updated to reflect new features, bug fixes, new known issues,
+ and changes in the list of supported platforms. The "Getting Started Guide"
+ should be updated to reflect the new release version number tag available from
+ Subversion and changes in basic system requirements. Merge both changes from
+ mainline into the release branch.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="tag">Tag the LLVM Final Release</a></h4>
+
+<div>
+
+<p>Tag the final release sources using the following procedure:</p>
+
+<div class="doc_code">
+<pre>
+$ svn copy https://llvm.org/svn/llvm-project/llvm/branches/release_XY \
+ https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_<i>XY</i>/Final
+
+$ svn copy https://llvm.org/svn/llvm-project/cfe/branches/release_XY \
+ https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_<i>XY</i>/Final
+
+$ svn copy https://llvm.org/svn/llvm-project/dragonegg/branches/release_XY \
+ https://llvm.org/svn/llvm-project/dragonegg/tags/RELEASE_<i>XY</i>/Final
+
+$ svn copy https://llvm.org/svn/llvm-project/test-suite/branches/release_XY \
+ https://llvm.org/svn/llvm-project/test-suite/tags/RELEASE_<i>XY</i>/Final
+</pre>
+</div>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="updemo">Update the LLVM Demo Page</a></h3>
+
+<div>
+
+<p>The LLVM demo page must be updated to use the new release. This consists of
+ using the new <tt>clang</tt> binary and building LLVM.</p>
+
+<!-- ======================================================================= -->
+<h4><a name="webupdates">Update the LLVM Website</a></h4>
+
+<div>
+
+<p>The website must be updated before the release announcement is sent out. Here
+ is what to do:</p>
+
+<ol>
+ <li>Check out the <tt>www</tt> module from Subversion.</li>
+
+ <li>Create a new subdirectory <tt>X.Y</tt> in the releases directory.</li>
+
+ <li>Commit the <tt>llvm</tt>, <tt>test-suite</tt>, <tt>clang</tt> source,
+ <tt>clang binaries</tt>, <tt>dragonegg</tt> source, and <tt>dragonegg</tt>
+ binaries in this new directory.</li>
+
+ <li>Copy and commit the <tt>llvm/docs</tt> and <tt>LICENSE.txt</tt> files
+ into this new directory. The docs should be built with
+ <tt>BUILD_FOR_WEBSITE=1</tt>.</li>
+
+ <li>Commit the <tt>index.html</tt> to the <tt>release/X.Y</tt> directory to
+ redirect (use from previous release.</li>
+
+ <li>Update the <tt>releases/download.html</tt> file with the new release.</li>
+
+ <li>Update the <tt>releases/index.html</tt> with the new release and link to
+ release documentation.</li>
+
+ <li>Finally, update the main page (<tt>index.html</tt> and sidebar) to point
+ to the new release and release announcement. Make sure this all gets
+ committed back into Subversion.</li>
+</ol>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="announce">Announce the Release</a></h4>
+
+<div>
+
+<p>Have Chris send out the release announcement when everything is finished.</p>
+
+</div>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a>
+ <br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/HowToSubmitABug.html b/docs/HowToSubmitABug.html
new file mode 100644
index 00000000000..39f83851293
--- /dev/null
+++ b/docs/HowToSubmitABug.html
@@ -0,0 +1,345 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>How to submit an LLVM bug report</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+<body>
+
+<h1>
+ How to submit an LLVM bug report
+</h1>
+
+<table class="layout" style="width: 90%" >
+<tr class="layout">
+ <td class="left">
+<ol>
+ <li><a href="#introduction">Introduction - Got bugs?</a></li>
+ <li><a href="#crashers">Crashing Bugs</a>
+ <ul>
+ <li><a href="#front-end">Front-end bugs</a>
+ <li><a href="#ct_optimizer">Compile-time optimization bugs</a>
+ <li><a href="#ct_codegen">Code generator bugs</a>
+ </ul></li>
+ <li><a href="#miscompilations">Miscompilations</a></li>
+ <li><a href="#codegen">Incorrect code generation (JIT and LLC)</a></li>
+</ol>
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a> and
+ <a href="http://misha.brukman.net">Misha Brukman</a></p>
+</div>
+</td>
+</tr>
+</table>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="introduction">Introduction - Got bugs?</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>If you're working with LLVM and run into a bug, we definitely want to know
+about it. This document describes what you can do to increase the odds of
+getting it fixed quickly.</p>
+
+<p>Basically you have to do two things at a minimum. First, decide whether the
+bug <a href="#crashers">crashes the compiler</a> (or an LLVM pass), or if the
+compiler is <a href="#miscompilations">miscompiling</a> the program (i.e., the
+compiler successfully produces an executable, but it doesn't run right). Based
+on
+what type of bug it is, follow the instructions in the linked section to narrow
+down the bug so that the person who fixes it will be able to find the problem
+more easily.</p>
+
+<p>Once you have a reduced test-case, go to <a
+href="http://llvm.org/bugs/enter_bug.cgi">the LLVM Bug Tracking
+System</a> and fill out the form with the necessary details (note that you don't
+need to pick a category, just use the "new-bugs" category if you're not sure).
+The bug description should contain the following
+information:</p>
+
+<ul>
+ <li>All information necessary to reproduce the problem.</li>
+ <li>The reduced test-case that triggers the bug.</li>
+ <li>The location where you obtained LLVM (if not from our Subversion
+ repository).</li>
+</ul>
+
+<p>Thanks for helping us make LLVM better!</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="crashers">Crashing Bugs</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>More often than not, bugs in the compiler cause it to crash&mdash;often due
+to an assertion failure of some sort. The most important
+piece of the puzzle is to figure out if it is crashing in the GCC front-end
+or if it is one of the LLVM libraries (e.g. the optimizer or code generator)
+that has problems.</p>
+
+<p>To figure out which component is crashing (the front-end,
+optimizer or code generator), run the
+<tt><b>llvm-gcc</b></tt> command line as you were when the crash occurred, but
+with the following extra command line options:</p>
+
+<ul>
+ <li><tt><b>-O0 -emit-llvm</b></tt>: If <tt>llvm-gcc</tt> still crashes when
+ passed these options (which disable the optimizer and code generator), then
+ the crash is in the front-end. Jump ahead to the section on <a
+ href="#front-end">front-end bugs</a>.</li>
+
+ <li><tt><b>-emit-llvm</b></tt>: If <tt>llvm-gcc</tt> crashes with this option
+ (which disables the code generator), you found an optimizer bug. Jump ahead
+ to <a href="#ct_optimizer"> compile-time optimization bugs</a>.</li>
+
+ <li>Otherwise, you have a code generator crash. Jump ahead to <a
+ href="#ct_codegen">code generator bugs</a>.</li>
+
+</ul>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="front-end">Front-end bugs</a>
+</h3>
+
+<div>
+
+<p>If the problem is in the front-end, you should re-run the same
+<tt>llvm-gcc</tt> command that resulted in the crash, but add the
+<tt>-save-temps</tt> option. The compiler will crash again, but it will leave
+behind a <tt><i>foo</i>.i</tt> file (containing preprocessed C source code) and
+possibly <tt><i>foo</i>.s</tt> for each
+compiled <tt><i>foo</i>.c</tt> file. Send us the <tt><i>foo</i>.i</tt> file,
+along with the options you passed to llvm-gcc, and a brief description of the
+error it caused.</p>
+
+<p>The <a href="http://delta.tigris.org/">delta</a> tool helps to reduce the
+preprocessed file down to the smallest amount of code that still replicates the
+problem. You're encouraged to use delta to reduce the code to make the
+developers' lives easier. <a
+href="http://gcc.gnu.org/wiki/A_guide_to_testcase_reduction">This website</a>
+has instructions on the best way to use delta.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ct_optimizer">Compile-time optimization bugs</a>
+</h3>
+
+<div>
+
+<p>If you find that a bug crashes in the optimizer, compile your test-case to a
+<tt>.bc</tt> file by passing "<tt><b>-emit-llvm -O0 -c -o foo.bc</b></tt>".
+Then run:</p>
+
+<div class="doc_code">
+<p><tt><b>opt</b> -std-compile-opts -debug-pass=Arguments foo.bc
+ -disable-output</tt></p>
+</div>
+
+<p>This command should do two things: it should print out a list of passes, and
+then it should crash in the same way as llvm-gcc. If it doesn't crash, please
+follow the instructions for a <a href="#front-end">front-end bug</a>.</p>
+
+<p>If this does crash, then you should be able to debug this with the following
+bugpoint command:</p>
+
+<div class="doc_code">
+<p><tt><b>bugpoint</b> foo.bc &lt;list of passes printed by
+<b>opt</b>&gt;</tt></p>
+</div>
+
+<p>Please run this, then file a bug with the instructions and reduced .bc files
+that bugpoint emits. If something goes wrong with bugpoint, please submit the
+"foo.bc" file and the list of passes printed by <b>opt</b>.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ct_codegen">Code generator bugs</a>
+</h3>
+
+<div>
+
+<p>If you find a bug that crashes llvm-gcc in the code generator, compile your
+source file to a .bc file by passing "<tt><b>-emit-llvm -c -o foo.bc</b></tt>"
+to llvm-gcc (in addition to the options you already pass). Once your have
+foo.bc, one of the following commands should fail:</p>
+
+<ol>
+<li><tt><b>llc</b> foo.bc</tt></li>
+<li><tt><b>llc</b> foo.bc -relocation-model=pic</tt></li>
+<li><tt><b>llc</b> foo.bc -relocation-model=static</tt></li>
+</ol>
+
+<p>If none of these crash, please follow the instructions for a
+<a href="#front-end">front-end bug</a>. If one of these do crash, you should
+be able to reduce this with one of the following bugpoint command lines (use
+the one corresponding to the command above that failed):</p>
+
+<ol>
+<li><tt><b>bugpoint</b> -run-llc foo.bc</tt></li>
+<li><tt><b>bugpoint</b> -run-llc foo.bc --tool-args
+ -relocation-model=pic</tt></li>
+<li><tt><b>bugpoint</b> -run-llc foo.bc --tool-args
+ -relocation-model=static</tt></li>
+</ol>
+
+<p>Please run this, then file a bug with the instructions and reduced .bc file
+that bugpoint emits. If something goes wrong with bugpoint, please submit the
+"foo.bc" file and the option that llc crashes with.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="miscompilations">Miscompilations</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>If llvm-gcc successfully produces an executable, but that executable doesn't
+run right, this is either a bug in the code or a bug in the
+compiler. The first thing to check is to make sure it is not using undefined
+behavior (e.g. reading a variable before it is defined). In particular, check
+to see if the program <a href="http://valgrind.org/">valgrind</a>s clean,
+passes purify, or some other memory checker tool. Many of the "LLVM bugs" that
+we have chased down ended up being bugs in the program being compiled, not
+ LLVM.</p>
+
+<p>Once you determine that the program itself is not buggy, you should choose
+which code generator you wish to compile the program with (e.g. LLC or the JIT)
+and optionally a series of LLVM passes to run. For example:</p>
+
+<div class="doc_code">
+<p><tt>
+<b>bugpoint</b> -run-llc [... optzn passes ...] file-to-test.bc --args -- [program arguments]</tt></p>
+</div>
+
+<p><tt>bugpoint</tt> will try to narrow down your list of passes to the one pass
+that causes an error, and simplify the bitcode file as much as it can to assist
+you. It will print a message letting you know how to reproduce the resulting
+error.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="codegen">Incorrect code generation</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Similarly to debugging incorrect compilation by mis-behaving passes, you can
+debug incorrect code generation by either LLC or the JIT, using
+<tt>bugpoint</tt>. The process <tt>bugpoint</tt> follows in this case is to try
+to narrow the code down to a function that is miscompiled by one or the other
+method, but since for correctness, the entire program must be run,
+<tt>bugpoint</tt> will compile the code it deems to not be affected with the C
+Backend, and then link in the shared object it generates.</p>
+
+<p>To debug the JIT:</p>
+
+<div class="doc_code">
+<pre>
+bugpoint -run-jit -output=[correct output file] [bitcode file] \
+ --tool-args -- [arguments to pass to lli] \
+ --args -- [program arguments]
+</pre>
+</div>
+
+<p>Similarly, to debug the LLC, one would run:</p>
+
+<div class="doc_code">
+<pre>
+bugpoint -run-llc -output=[correct output file] [bitcode file] \
+ --tool-args -- [arguments to pass to llc] \
+ --args -- [program arguments]
+</pre>
+</div>
+
+<p><b>Special note:</b> if you are debugging MultiSource or SPEC tests that
+already exist in the <tt>llvm/test</tt> hierarchy, there is an easier way to
+debug the JIT, LLC, and CBE, using the pre-written Makefile targets, which
+will pass the program options specified in the Makefiles:</p>
+
+<div class="doc_code">
+<p><tt>
+cd llvm/test/../../program<br>
+make bugpoint-jit
+</tt></p>
+</div>
+
+<p>At the end of a successful <tt>bugpoint</tt> run, you will be presented
+with two bitcode files: a <em>safe</em> file which can be compiled with the C
+backend and the <em>test</em> file which either LLC or the JIT
+mis-codegenerates, and thus causes the error.</p>
+
+<p>To reproduce the error that <tt>bugpoint</tt> found, it is sufficient to do
+the following:</p>
+
+<ol>
+
+<li><p>Regenerate the shared object from the safe bitcode file:</p>
+
+<div class="doc_code">
+<p><tt>
+<b>llc</b> -march=c safe.bc -o safe.c<br>
+<b>gcc</b> -shared safe.c -o safe.so
+</tt></p>
+</div></li>
+
+<li><p>If debugging LLC, compile test bitcode native and link with the shared
+ object:</p>
+
+<div class="doc_code">
+<p><tt>
+<b>llc</b> test.bc -o test.s<br>
+<b>gcc</b> test.s safe.so -o test.llc<br>
+./test.llc [program options]
+</tt></p>
+</div></li>
+
+<li><p>If debugging the JIT, load the shared object and supply the test
+ bitcode:</p>
+
+<div class="doc_code">
+<p><tt><b>lli</b> -load=safe.so test.bc [program options]</tt></p>
+</div></li>
+
+</ol>
+
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a>
+ <br>
+ Last modified: $Date$
+</address>
+
+</body>
+</html>
diff --git a/docs/LLVMBuild.html b/docs/LLVMBuild.html
new file mode 100644
index 00000000000..9e7f8c76577
--- /dev/null
+++ b/docs/LLVMBuild.html
@@ -0,0 +1,368 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>LLVMBuild Documentation</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+<body>
+
+<h1>LLVMBuild Guide</h1>
+
+<ol>
+ <li><a href="#introduction">Introduction</a></li>
+ <li><a href="#projectorg">Project Organization</a></li>
+ <li><a href="#buildintegration">Build Integration</a></li>
+ <li><a href="#componentoverview">Component Overview</a></li>
+ <li><a href="#formatreference">Format Reference</a></li>
+</ol>
+
+<!-- *********************************************************************** -->
+<h2><a name="introduction">Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+ <p>This document describes the <tt>LLVMBuild</tt> organization and files which
+ we use to describe parts of the LLVM ecosystem. For description of specific
+ LLVMBuild related tools, please see the command guide.</p>
+
+ <p>LLVM is designed to be a modular set of libraries which can be flexibly
+ mixed together in order to build a variety of tools, like compilers, JITs,
+ custom code generators, optimization passes, interpreters, and so on. Related
+ projects in the LLVM system like Clang and LLDB also tend to follow this
+ philosophy.</p>
+
+ <p>In order to support this usage style, LLVM has a fairly strict structure as
+ to how the source code and various components are organized. The
+ <tt>LLVMBuild.txt</tt> files are the explicit specification of that structure,
+ and are used by the build systems and other tools in order to develop the LLVM
+ project.</p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="projectorg">Project Organization</a></h2>
+<!-- *********************************************************************** -->
+
+<!-- FIXME: We should probably have an explicit top level project object. Good
+place to hang project level data, name, etc. Also useful for serving as the
+$ROOT of project trees for things which can be checked out separately. -->
+
+<div>
+ <p>The source code for LLVM projects using the LLVMBuild system (LLVM, Clang,
+ and LLDB) is organized into <em>components</em>, which define the separate
+ pieces of functionality that make up the project. These projects may consist
+ of many libraries, associated tools, build tools, or other utility tools (for
+ example, testing tools).</p>
+
+ <p>For the most part, the project contents are organized around defining one
+ main component per each subdirectory. Each such directory contains
+ an <tt>LLVMBuild.txt</tt> which contains the component definitions.</p>
+
+ <p>The component descriptions for the project as a whole are automatically
+ gathered by the LLVMBuild tools. The tools automatically traverse the source
+ directory structure to find all of the component description files. NOTE: For
+ performance/sanity reasons, we only traverse into subdirectories when the
+ parent itself contains an <tt>LLVMBuild.txt</tt> description file.</p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="buildintegration">Build Integration</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+ <p>The LLVMBuild files themselves are just a declarative way to describe the
+ project structure. The actual building of the LLVM project is handled by
+ another build system (currently we support
+ both <a href="MakefileGuide.html">Makefiles</a>
+ and <a href="CMake.html">CMake</a>.</p>
+
+ <p>The build system implementation will load the relevant contents of the
+ LLVMBuild files and use that to drive the actual project build. Typically, the
+ build system will only need to load this information at "configure" time, and
+ use it to generative native information. Build systems will also handle
+ automatically reconfiguring their information when the contents of
+ the <i>LLVMBuild.txt</i> files change.</p>
+
+ <p>Developers generally are not expected to need to be aware of the details of
+ how the LLVMBuild system is integrated into their build. Ideally, LLVM
+ developers who are not working on the build system would only ever need to
+ modify the contents of the <i>LLVMBuild.txt</i> description files (although we
+ have not reached this goal yet).</p>
+
+ <p>For more information on the utility tool we provide to help interfacing
+ with the build system, please see
+ the <a href="CommandGuide/html/llvm-build.html">llvm-build</a>
+ documentation.</p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="componentoverview">Component Overview</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+ <p>As mentioned earlier, LLVM projects are organized into
+ logical <em>components</em>. Every component is typically grouped into its
+ own subdirectory. Generally, a component is organized around a coherent group
+ of sources which have some kind of clear API separation from other parts of
+ the code.</p>
+
+ <p>LLVM primarily uses the following types of components:</p>
+ <ul>
+ <li><em>Libraries</em> - Library components define a distinct API which can
+ be independently linked into LLVM client applications. Libraries typically
+ have private and public header files, and may specify a link of required
+ libraries that they build on top of.</li>
+
+ <li><em>Build Tools</em> - Build tools are applications which are designed
+ to be run as part of the build process (typically to generate other source
+ files). Currently, LLVM uses one main build tool
+ called <a href="TableGenFundamentals.html">TableGen</a> to generate a
+ variety of source files.</li>
+
+ <li><em>Tools</em> - Command line applications which are built using the
+ LLVM component libraries. Most LLVM tools are small and are primarily
+ frontends to the library interfaces.</li>
+
+<!-- FIXME: We also need shared libraries as a first class component, but this
+ is not yet implemented. -->
+ </ul>
+
+ <p>Components are described using <em>LLVMBuild.txt</em> files in the
+ directories that define the component. See
+ the <a href="#formatreference">Format Reference</a> section for information on
+ the exact format of these files.</p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="formatreference">LLVMBuild Format Reference</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+ <p>LLVMBuild files are written in a simple variant of the INI or configuration
+ file format (<a href="http://en.wikipedia.org/wiki/INI_file">Wikipedia
+ entry</a>). The format defines a list of sections each of which may contain
+ some number of properties. A simple example of the file format is below:</p>
+ <div class="doc_code">
+ <pre>
+<i>; Comments start with a semi-colon.</i>
+
+<i>; Sections are declared using square brackets.</i>
+[component_0]
+
+<i>; Properties are declared using '=' and are contained in the previous section.
+;
+; We support simple string and boolean scalar values and list values, where
+; items are separated by spaces. There is no support for quoting, and so
+; property values may not contain spaces.</i>
+property_name = property_value
+list_property_name = value_1 value_2 <em>...</em> value_n
+boolean_property_name = 1 <em>(or 0)</em>
+</pre>
+ </div>
+
+ <p>LLVMBuild files are expected to define a strict set of sections and
+ properties. An typical component description file for a library
+ component would look typically look like the following example:</p>
+ <div class="doc_code">
+ <pre>
+[component_0]
+type = Library
+name = Linker
+parent = Libraries
+required_libraries = Archive BitReader Core Support TransformUtils
+</pre>
+ </div>
+
+ <p>A full description of the exact sections and properties which are allowed
+ follows.</p>
+
+ <p>Each file may define exactly one common component, named "common". The
+ common component may define the following properties:</p>
+ <ul>
+ <li><i>subdirectories</i> <b>[optional]</b>
+ <p>If given, a list of the names of the subdirectories from the current
+ subpath to search for additional LLVMBuild files.</p></li>
+ </ul>
+
+ <p>Each file may define multiple components. Each component is described by a
+ section who name starts with "component". The remainder of the section name is
+ ignored, but each section name must be unique. Typically components are just
+ number in order for files with multiple components ("component_0",
+ "component_1", and so on).<p>
+
+ <p><b>Section names not matching this format (or the "common" section) are
+ currently unused and are disallowed.</b></p>
+
+ <p>Every component is defined by the properties in the section. The exact list
+ of properties that are allowed depends on the component
+ type. Components <b>may not</b> define any properties other than those
+ expected by the component type.</p>
+
+ <p>Every component must define the following properties:</p>
+ <ul>
+ <li><i>type</i> <b>[required]</b>
+ <p>The type of the component. Supported component types are
+ detailed below. Most components will define additional properties which
+ may be required or optional.</p></li>
+
+ <li><i>name</i> <b>[required]</b>
+ <p>The name of the component. Names are required to be unique
+ across the entire project.</p></li>
+
+ <li><i>parent</i> <b>[required]</b>
+ <p>The name of the logical parent of the component. Components are
+ organized into a logical tree to make it easier to navigate and organize
+ groups of components. The parents have no semantics as far as the project
+ build is concerned, however. Typically, the parent will be the main
+ component of the parent directory.</p>
+
+ <!-- FIXME: Should we make the parent optional, and default to parent
+ directories component? -->
+
+ <p>Components may reference the root pseudo component using '$ROOT' to
+ indicate they should logically be grouped at the top-level.</p>
+ </li>
+ </ul>
+
+ <p>Components may define the following properties:</p>
+ <ul>
+ <li><i>dependencies</i> <b>[optional]</b>
+ <p>If specified, a list of names of components which <i>must</i> be built
+ prior to this one. This should only be exactly those components which
+ produce some tool or source code required for building the
+ component.</p>
+
+ <p><em>NOTE:</em> Group and LibraryGroup components have no semantics for
+ the actual build, and are not allowed to specify dependencies.</p></li>
+ </ul>
+
+ <p>The following section lists the available component types, as well as the
+ properties which are associated with that component.</p>
+
+ <ul>
+ <li><i>type = Group</i>
+ <p>Group components exist purely to allow additional arbitrary structuring
+ of the logical components tree. For example, one might define a
+ "Libraries" group to hold all of the root library components.</p>
+
+ <p>Group components have no additionally properties.</p>
+ </li>
+
+ <li><i>type = Library</i>
+ <p>Library components define an individual library which should be built
+ from the source code in the component directory.</p>
+
+ <p>Components with this type use the following properties:</p>
+ <ul>
+ <li><i>library_name</i> <b>[optional]</b>
+ <p>If given, the name to use for the actual library file on disk. If
+ not given, the name is derived from the component name
+ itself.</p></li>
+
+ <li><i>required_libraries</i> <b>[optional]</b>
+ <p>If given, a list of the names of Library or LibraryGroup components
+ which must also be linked in whenever this library is used. That is,
+ the link time dependencies for this component. When tools are built,
+ the build system will include the transitive closure of
+ all <i>required_libraries</i> for the components the tool needs.</p></li>
+
+ <li><i>add_to_library_groups</i> <b>[optional]</b>
+ <p>If given, a list of the names of LibraryGroup components which this
+ component is also part of. This allows nesting groups of
+ components. For example, the <i>X86</i> target might define a library
+ group for all of the <i>X86</i> components. That library group might
+ then be included in the <i>all-targets</i> library group.</p></li>
+
+ <li><i>installed</i> <b>[optional]</b> <b>[boolean]</b>
+ <p>Whether this library is installed. Libraries that are not installed
+ are only reported by <tt>llvm-config</tt> when it is run as part of a
+ development directory.</p></li>
+ </ul>
+ </li>
+
+ <li><i>type = LibraryGroup</i>
+ <p>LibraryGroup components are a mechanism to allow easy definition of
+ useful sets of related components. In particular, we use them to easily
+ specify things like "all targets", or "all assembly printers".</p>
+
+ <p>Components with this type use the following properties:</p>
+ <ul>
+ <li><i>required_libraries</i> <b>[optional]</b>
+ <p>See the Library type for a description of this property.</p></li>
+
+ <li><i>add_to_library_groups</i> <b>[optional]</b>
+ <p>See the Library type for a description of this property.</p></li>
+ </ul>
+ </li>
+
+ <li><i>type = TargetGroup</i>
+ <p>TargetGroup components are an extension of LibraryGroups, specifically
+ for defining LLVM targets (which are handled specially in a few
+ places).</p>
+
+ <p>The name of the component should always be the name of the target.</p>
+
+ <p>Components with this type use the LibraryGroup properties in addition
+ to:</p>
+ <ul>
+ <li><i>has_asmparser</i> <b>[optional]</b> <b>[boolean]</b>
+ <p>Whether this target defines an assembly parser.</p></li>
+ <li><i>has_asmprinter</i> <b>[optional]</b> <b>[boolean]</b>
+ <p>Whether this target defines an assembly printer.</p></li>
+ <li><i>has_disassembler</i> <b>[optional]</b> <b>[boolean]</b>
+ <p>Whether this target defines a disassembler.</p></li>
+ <li><i>has_jit</i> <b>[optional]</b> <b>[boolean]</b>
+ <p>Whether this target supports JIT compilation.</p></li>
+ </ul>
+ </li>
+
+ <li><i>type = Tool</i>
+ <p>Tool components define standalone command line tools which should be
+ built from the source code in the component directory and linked.</p>
+
+ <p>Components with this type use the following properties:</p>
+ <ul>
+ <li><i>required_libraries</i> <b>[optional]</b>
+
+ <p>If given, a list of the names of Library or LibraryGroup components
+ which this tool is required to be linked with. <b>NOTE:</b> The values
+ should be the component names, which may not always match up with the
+ actual library names on disk.</p>
+
+ <p>Build systems are expected to properly include all of the libraries
+ required by the linked components (i.e., the transitive closer
+ of <em>required_libraries</em>).</p>
+
+ <p>Build systems are also expected to understand that those library
+ components must be built prior to linking -- they do not also need to
+ be listed under <i>dependencies</i>.</p></li>
+ </ul>
+ </li>
+
+ <li><i>type = BuildTool</i>
+ <p>BuildTool components are like Tool components, except that the tool is
+ supposed to be built for the platform where the build is running (instead
+ of that platform being targetted). Build systems are expected to handle
+ the fact that required libraries may need to be built for multiple
+ platforms in order to be able to link this tool.</p>
+
+ <p>BuildTool components currently use the exact same properties as Tool
+ components, the type distinction is only used to differentiate what the
+ tool is built for.</p>
+ </li>
+ </ul>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/LLVMBuild.txt b/docs/LLVMBuild.txt
new file mode 100644
index 00000000000..d5aea864ecd
--- /dev/null
+++ b/docs/LLVMBuild.txt
@@ -0,0 +1,21 @@
+;===- ./docs/LLVMBuild.txt -------------------------------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Group
+name = Docs
+parent = $ROOT
diff --git a/docs/LangRef.html b/docs/LangRef.html
new file mode 100644
index 00000000000..4daab592e9d
--- /dev/null
+++ b/docs/LangRef.html
@@ -0,0 +1,8731 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <title>LLVM Assembly Language Reference Manual</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <meta name="description"
+ content="LLVM Assembly Language Reference Manual.">
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>LLVM Language Reference Manual</h1>
+<ol>
+ <li><a href="#abstract">Abstract</a></li>
+ <li><a href="#introduction">Introduction</a></li>
+ <li><a href="#identifiers">Identifiers</a></li>
+ <li><a href="#highlevel">High Level Structure</a>
+ <ol>
+ <li><a href="#modulestructure">Module Structure</a></li>
+ <li><a href="#linkage">Linkage Types</a>
+ <ol>
+ <li><a href="#linkage_private">'<tt>private</tt>' Linkage</a></li>
+ <li><a href="#linkage_linker_private">'<tt>linker_private</tt>' Linkage</a></li>
+ <li><a href="#linkage_linker_private_weak">'<tt>linker_private_weak</tt>' Linkage</a></li>
+ <li><a href="#linkage_internal">'<tt>internal</tt>' Linkage</a></li>
+ <li><a href="#linkage_available_externally">'<tt>available_externally</tt>' Linkage</a></li>
+ <li><a href="#linkage_linkonce">'<tt>linkonce</tt>' Linkage</a></li>
+ <li><a href="#linkage_common">'<tt>common</tt>' Linkage</a></li>
+ <li><a href="#linkage_weak">'<tt>weak</tt>' Linkage</a></li>
+ <li><a href="#linkage_appending">'<tt>appending</tt>' Linkage</a></li>
+ <li><a href="#linkage_externweak">'<tt>extern_weak</tt>' Linkage</a></li>
+ <li><a href="#linkage_linkonce_odr">'<tt>linkonce_odr</tt>' Linkage</a></li>
+ <li><a href="#linkage_linkonce_odr_auto_hide">'<tt>linkonce_odr_auto_hide</tt>' Linkage</a></li>
+ <li><a href="#linkage_weak">'<tt>weak_odr</tt>' Linkage</a></li>
+ <li><a href="#linkage_external">'<tt>external</tt>' Linkage</a></li>
+ <li><a href="#linkage_dllimport">'<tt>dllimport</tt>' Linkage</a></li>
+ <li><a href="#linkage_dllexport">'<tt>dllexport</tt>' Linkage</a></li>
+ </ol>
+ </li>
+ <li><a href="#callingconv">Calling Conventions</a></li>
+ <li><a href="#namedtypes">Named Types</a></li>
+ <li><a href="#globalvars">Global Variables</a></li>
+ <li><a href="#functionstructure">Functions</a></li>
+ <li><a href="#aliasstructure">Aliases</a></li>
+ <li><a href="#namedmetadatastructure">Named Metadata</a></li>
+ <li><a href="#paramattrs">Parameter Attributes</a></li>
+ <li><a href="#fnattrs">Function Attributes</a></li>
+ <li><a href="#gc">Garbage Collector Names</a></li>
+ <li><a href="#moduleasm">Module-Level Inline Assembly</a></li>
+ <li><a href="#datalayout">Data Layout</a></li>
+ <li><a href="#pointeraliasing">Pointer Aliasing Rules</a></li>
+ <li><a href="#volatile">Volatile Memory Accesses</a></li>
+ <li><a href="#memmodel">Memory Model for Concurrent Operations</a></li>
+ <li><a href="#ordering">Atomic Memory Ordering Constraints</a></li>
+ </ol>
+ </li>
+ <li><a href="#typesystem">Type System</a>
+ <ol>
+ <li><a href="#t_classifications">Type Classifications</a></li>
+ <li><a href="#t_primitive">Primitive Types</a>
+ <ol>
+ <li><a href="#t_integer">Integer Type</a></li>
+ <li><a href="#t_floating">Floating Point Types</a></li>
+ <li><a href="#t_x86mmx">X86mmx Type</a></li>
+ <li><a href="#t_void">Void Type</a></li>
+ <li><a href="#t_label">Label Type</a></li>
+ <li><a href="#t_metadata">Metadata Type</a></li>
+ </ol>
+ </li>
+ <li><a href="#t_derived">Derived Types</a>
+ <ol>
+ <li><a href="#t_aggregate">Aggregate Types</a>
+ <ol>
+ <li><a href="#t_array">Array Type</a></li>
+ <li><a href="#t_struct">Structure Type</a></li>
+ <li><a href="#t_opaque">Opaque Structure Types</a></li>
+ <li><a href="#t_vector">Vector Type</a></li>
+ </ol>
+ </li>
+ <li><a href="#t_function">Function Type</a></li>
+ <li><a href="#t_pointer">Pointer Type</a></li>
+ </ol>
+ </li>
+ </ol>
+ </li>
+ <li><a href="#constants">Constants</a>
+ <ol>
+ <li><a href="#simpleconstants">Simple Constants</a></li>
+ <li><a href="#complexconstants">Complex Constants</a></li>
+ <li><a href="#globalconstants">Global Variable and Function Addresses</a></li>
+ <li><a href="#undefvalues">Undefined Values</a></li>
+ <li><a href="#poisonvalues">Poison Values</a></li>
+ <li><a href="#blockaddress">Addresses of Basic Blocks</a></li>
+ <li><a href="#constantexprs">Constant Expressions</a></li>
+ </ol>
+ </li>
+ <li><a href="#othervalues">Other Values</a>
+ <ol>
+ <li><a href="#inlineasm">Inline Assembler Expressions</a></li>
+ <li><a href="#metadata">Metadata Nodes and Metadata Strings</a>
+ <ol>
+ <li><a href="#tbaa">'<tt>tbaa</tt>' Metadata</a></li>
+ <li><a href="#fpmath">'<tt>fpmath</tt>' Metadata</a></li>
+ <li><a href="#range">'<tt>range</tt>' Metadata</a></li>
+ </ol>
+ </li>
+ </ol>
+ </li>
+ <li><a href="#module_flags">Module Flags Metadata</a>
+ <ol>
+ <li><a href="#objc_gc_flags">Objective-C Garbage Collection Module Flags Metadata</a></li>
+ </ol>
+ </li>
+ <li><a href="#intrinsic_globals">Intrinsic Global Variables</a>
+ <ol>
+ <li><a href="#intg_used">The '<tt>llvm.used</tt>' Global Variable</a></li>
+ <li><a href="#intg_compiler_used">The '<tt>llvm.compiler.used</tt>'
+ Global Variable</a></li>
+ <li><a href="#intg_global_ctors">The '<tt>llvm.global_ctors</tt>'
+ Global Variable</a></li>
+ <li><a href="#intg_global_dtors">The '<tt>llvm.global_dtors</tt>'
+ Global Variable</a></li>
+ </ol>
+ </li>
+ <li><a href="#instref">Instruction Reference</a>
+ <ol>
+ <li><a href="#terminators">Terminator Instructions</a>
+ <ol>
+ <li><a href="#i_ret">'<tt>ret</tt>' Instruction</a></li>
+ <li><a href="#i_br">'<tt>br</tt>' Instruction</a></li>
+ <li><a href="#i_switch">'<tt>switch</tt>' Instruction</a></li>
+ <li><a href="#i_indirectbr">'<tt>indirectbr</tt>' Instruction</a></li>
+ <li><a href="#i_invoke">'<tt>invoke</tt>' Instruction</a></li>
+ <li><a href="#i_resume">'<tt>resume</tt>' Instruction</a></li>
+ <li><a href="#i_unreachable">'<tt>unreachable</tt>' Instruction</a></li>
+ </ol>
+ </li>
+ <li><a href="#binaryops">Binary Operations</a>
+ <ol>
+ <li><a href="#i_add">'<tt>add</tt>' Instruction</a></li>
+ <li><a href="#i_fadd">'<tt>fadd</tt>' Instruction</a></li>
+ <li><a href="#i_sub">'<tt>sub</tt>' Instruction</a></li>
+ <li><a href="#i_fsub">'<tt>fsub</tt>' Instruction</a></li>
+ <li><a href="#i_mul">'<tt>mul</tt>' Instruction</a></li>
+ <li><a href="#i_fmul">'<tt>fmul</tt>' Instruction</a></li>
+ <li><a href="#i_udiv">'<tt>udiv</tt>' Instruction</a></li>
+ <li><a href="#i_sdiv">'<tt>sdiv</tt>' Instruction</a></li>
+ <li><a href="#i_fdiv">'<tt>fdiv</tt>' Instruction</a></li>
+ <li><a href="#i_urem">'<tt>urem</tt>' Instruction</a></li>
+ <li><a href="#i_srem">'<tt>srem</tt>' Instruction</a></li>
+ <li><a href="#i_frem">'<tt>frem</tt>' Instruction</a></li>
+ </ol>
+ </li>
+ <li><a href="#bitwiseops">Bitwise Binary Operations</a>
+ <ol>
+ <li><a href="#i_shl">'<tt>shl</tt>' Instruction</a></li>
+ <li><a href="#i_lshr">'<tt>lshr</tt>' Instruction</a></li>
+ <li><a href="#i_ashr">'<tt>ashr</tt>' Instruction</a></li>
+ <li><a href="#i_and">'<tt>and</tt>' Instruction</a></li>
+ <li><a href="#i_or">'<tt>or</tt>' Instruction</a></li>
+ <li><a href="#i_xor">'<tt>xor</tt>' Instruction</a></li>
+ </ol>
+ </li>
+ <li><a href="#vectorops">Vector Operations</a>
+ <ol>
+ <li><a href="#i_extractelement">'<tt>extractelement</tt>' Instruction</a></li>
+ <li><a href="#i_insertelement">'<tt>insertelement</tt>' Instruction</a></li>
+ <li><a href="#i_shufflevector">'<tt>shufflevector</tt>' Instruction</a></li>
+ </ol>
+ </li>
+ <li><a href="#aggregateops">Aggregate Operations</a>
+ <ol>
+ <li><a href="#i_extractvalue">'<tt>extractvalue</tt>' Instruction</a></li>
+ <li><a href="#i_insertvalue">'<tt>insertvalue</tt>' Instruction</a></li>
+ </ol>
+ </li>
+ <li><a href="#memoryops">Memory Access and Addressing Operations</a>
+ <ol>
+ <li><a href="#i_alloca">'<tt>alloca</tt>' Instruction</a></li>
+ <li><a href="#i_load">'<tt>load</tt>' Instruction</a></li>
+ <li><a href="#i_store">'<tt>store</tt>' Instruction</a></li>
+ <li><a href="#i_fence">'<tt>fence</tt>' Instruction</a></li>
+ <li><a href="#i_cmpxchg">'<tt>cmpxchg</tt>' Instruction</a></li>
+ <li><a href="#i_atomicrmw">'<tt>atomicrmw</tt>' Instruction</a></li>
+ <li><a href="#i_getelementptr">'<tt>getelementptr</tt>' Instruction</a></li>
+ </ol>
+ </li>
+ <li><a href="#convertops">Conversion Operations</a>
+ <ol>
+ <li><a href="#i_trunc">'<tt>trunc .. to</tt>' Instruction</a></li>
+ <li><a href="#i_zext">'<tt>zext .. to</tt>' Instruction</a></li>
+ <li><a href="#i_sext">'<tt>sext .. to</tt>' Instruction</a></li>
+ <li><a href="#i_fptrunc">'<tt>fptrunc .. to</tt>' Instruction</a></li>
+ <li><a href="#i_fpext">'<tt>fpext .. to</tt>' Instruction</a></li>
+ <li><a href="#i_fptoui">'<tt>fptoui .. to</tt>' Instruction</a></li>
+ <li><a href="#i_fptosi">'<tt>fptosi .. to</tt>' Instruction</a></li>
+ <li><a href="#i_uitofp">'<tt>uitofp .. to</tt>' Instruction</a></li>
+ <li><a href="#i_sitofp">'<tt>sitofp .. to</tt>' Instruction</a></li>
+ <li><a href="#i_ptrtoint">'<tt>ptrtoint .. to</tt>' Instruction</a></li>
+ <li><a href="#i_inttoptr">'<tt>inttoptr .. to</tt>' Instruction</a></li>
+ <li><a href="#i_bitcast">'<tt>bitcast .. to</tt>' Instruction</a></li>
+ </ol>
+ </li>
+ <li><a href="#otherops">Other Operations</a>
+ <ol>
+ <li><a href="#i_icmp">'<tt>icmp</tt>' Instruction</a></li>
+ <li><a href="#i_fcmp">'<tt>fcmp</tt>' Instruction</a></li>
+ <li><a href="#i_phi">'<tt>phi</tt>' Instruction</a></li>
+ <li><a href="#i_select">'<tt>select</tt>' Instruction</a></li>
+ <li><a href="#i_call">'<tt>call</tt>' Instruction</a></li>
+ <li><a href="#i_va_arg">'<tt>va_arg</tt>' Instruction</a></li>
+ <li><a href="#i_landingpad">'<tt>landingpad</tt>' Instruction</a></li>
+ </ol>
+ </li>
+ </ol>
+ </li>
+ <li><a href="#intrinsics">Intrinsic Functions</a>
+ <ol>
+ <li><a href="#int_varargs">Variable Argument Handling Intrinsics</a>
+ <ol>
+ <li><a href="#int_va_start">'<tt>llvm.va_start</tt>' Intrinsic</a></li>
+ <li><a href="#int_va_end">'<tt>llvm.va_end</tt>' Intrinsic</a></li>
+ <li><a href="#int_va_copy">'<tt>llvm.va_copy</tt>' Intrinsic</a></li>
+ </ol>
+ </li>
+ <li><a href="#int_gc">Accurate Garbage Collection Intrinsics</a>
+ <ol>
+ <li><a href="#int_gcroot">'<tt>llvm.gcroot</tt>' Intrinsic</a></li>
+ <li><a href="#int_gcread">'<tt>llvm.gcread</tt>' Intrinsic</a></li>
+ <li><a href="#int_gcwrite">'<tt>llvm.gcwrite</tt>' Intrinsic</a></li>
+ </ol>
+ </li>
+ <li><a href="#int_codegen">Code Generator Intrinsics</a>
+ <ol>
+ <li><a href="#int_returnaddress">'<tt>llvm.returnaddress</tt>' Intrinsic</a></li>
+ <li><a href="#int_frameaddress">'<tt>llvm.frameaddress</tt>' Intrinsic</a></li>
+ <li><a href="#int_stacksave">'<tt>llvm.stacksave</tt>' Intrinsic</a></li>
+ <li><a href="#int_stackrestore">'<tt>llvm.stackrestore</tt>' Intrinsic</a></li>
+ <li><a href="#int_prefetch">'<tt>llvm.prefetch</tt>' Intrinsic</a></li>
+ <li><a href="#int_pcmarker">'<tt>llvm.pcmarker</tt>' Intrinsic</a></li>
+ <li><a href="#int_readcyclecounter">'<tt>llvm.readcyclecounter</tt>' Intrinsic</a></li>
+ </ol>
+ </li>
+ <li><a href="#int_libc">Standard C Library Intrinsics</a>
+ <ol>
+ <li><a href="#int_memcpy">'<tt>llvm.memcpy.*</tt>' Intrinsic</a></li>
+ <li><a href="#int_memmove">'<tt>llvm.memmove.*</tt>' Intrinsic</a></li>
+ <li><a href="#int_memset">'<tt>llvm.memset.*</tt>' Intrinsic</a></li>
+ <li><a href="#int_sqrt">'<tt>llvm.sqrt.*</tt>' Intrinsic</a></li>
+ <li><a href="#int_powi">'<tt>llvm.powi.*</tt>' Intrinsic</a></li>
+ <li><a href="#int_sin">'<tt>llvm.sin.*</tt>' Intrinsic</a></li>
+ <li><a href="#int_cos">'<tt>llvm.cos.*</tt>' Intrinsic</a></li>
+ <li><a href="#int_pow">'<tt>llvm.pow.*</tt>' Intrinsic</a></li>
+ <li><a href="#int_exp">'<tt>llvm.exp.*</tt>' Intrinsic</a></li>
+ <li><a href="#int_log">'<tt>llvm.log.*</tt>' Intrinsic</a></li>
+ <li><a href="#int_fma">'<tt>llvm.fma.*</tt>' Intrinsic</a></li>
+ <li><a href="#int_fabs">'<tt>llvm.fabs.*</tt>' Intrinsic</a></li>
+ <li><a href="#int_floor">'<tt>llvm.floor.*</tt>' Intrinsic</a></li>
+ </ol>
+ </li>
+ <li><a href="#int_manip">Bit Manipulation Intrinsics</a>
+ <ol>
+ <li><a href="#int_bswap">'<tt>llvm.bswap.*</tt>' Intrinsics</a></li>
+ <li><a href="#int_ctpop">'<tt>llvm.ctpop.*</tt>' Intrinsic </a></li>
+ <li><a href="#int_ctlz">'<tt>llvm.ctlz.*</tt>' Intrinsic </a></li>
+ <li><a href="#int_cttz">'<tt>llvm.cttz.*</tt>' Intrinsic </a></li>
+ </ol>
+ </li>
+ <li><a href="#int_overflow">Arithmetic with Overflow Intrinsics</a>
+ <ol>
+ <li><a href="#int_sadd_overflow">'<tt>llvm.sadd.with.overflow.*</tt> Intrinsics</a></li>
+ <li><a href="#int_uadd_overflow">'<tt>llvm.uadd.with.overflow.*</tt> Intrinsics</a></li>
+ <li><a href="#int_ssub_overflow">'<tt>llvm.ssub.with.overflow.*</tt> Intrinsics</a></li>
+ <li><a href="#int_usub_overflow">'<tt>llvm.usub.with.overflow.*</tt> Intrinsics</a></li>
+ <li><a href="#int_smul_overflow">'<tt>llvm.smul.with.overflow.*</tt> Intrinsics</a></li>
+ <li><a href="#int_umul_overflow">'<tt>llvm.umul.with.overflow.*</tt> Intrinsics</a></li>
+ </ol>
+ </li>
+ <li><a href="#spec_arithmetic">Specialised Arithmetic Intrinsics</a>
+ <ol>
+ <li><a href="#fmuladd">'<tt>llvm.fmuladd</tt> Intrinsic</a></li>
+ </ol>
+ </li>
+ <li><a href="#int_fp16">Half Precision Floating Point Intrinsics</a>
+ <ol>
+ <li><a href="#int_convert_to_fp16">'<tt>llvm.convert.to.fp16</tt>' Intrinsic</a></li>
+ <li><a href="#int_convert_from_fp16">'<tt>llvm.convert.from.fp16</tt>' Intrinsic</a></li>
+ </ol>
+ </li>
+ <li><a href="#int_debugger">Debugger intrinsics</a></li>
+ <li><a href="#int_eh">Exception Handling intrinsics</a></li>
+ <li><a href="#int_trampoline">Trampoline Intrinsics</a>
+ <ol>
+ <li><a href="#int_it">'<tt>llvm.init.trampoline</tt>' Intrinsic</a></li>
+ <li><a href="#int_at">'<tt>llvm.adjust.trampoline</tt>' Intrinsic</a></li>
+ </ol>
+ </li>
+ <li><a href="#int_memorymarkers">Memory Use Markers</a>
+ <ol>
+ <li><a href="#int_lifetime_start">'<tt>llvm.lifetime.start</tt>' Intrinsic</a></li>
+ <li><a href="#int_lifetime_end">'<tt>llvm.lifetime.end</tt>' Intrinsic</a></li>
+ <li><a href="#int_invariant_start">'<tt>llvm.invariant.start</tt>' Intrinsic</a></li>
+ <li><a href="#int_invariant_end">'<tt>llvm.invariant.end</tt>' Intrinsic</a></li>
+ </ol>
+ </li>
+ <li><a href="#int_general">General intrinsics</a>
+ <ol>
+ <li><a href="#int_var_annotation">
+ '<tt>llvm.var.annotation</tt>' Intrinsic</a></li>
+ <li><a href="#int_annotation">
+ '<tt>llvm.annotation.*</tt>' Intrinsic</a></li>
+ <li><a href="#int_trap">
+ '<tt>llvm.trap</tt>' Intrinsic</a></li>
+ <li><a href="#int_debugtrap">
+ '<tt>llvm.debugtrap</tt>' Intrinsic</a></li>
+ <li><a href="#int_stackprotector">
+ '<tt>llvm.stackprotector</tt>' Intrinsic</a></li>
+ <li><a href="#int_objectsize">
+ '<tt>llvm.objectsize</tt>' Intrinsic</a></li>
+ <li><a href="#int_expect">
+ '<tt>llvm.expect</tt>' Intrinsic</a></li>
+ <li><a href="#int_donothing">
+ '<tt>llvm.donothing</tt>' Intrinsic</a></li>
+ </ol>
+ </li>
+ </ol>
+ </li>
+</ol>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a>
+ and <a href="mailto:vadve@cs.uiuc.edu">Vikram Adve</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="abstract">Abstract</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>This document is a reference manual for the LLVM assembly language. LLVM is
+ a Static Single Assignment (SSA) based representation that provides type
+ safety, low-level operations, flexibility, and the capability of representing
+ 'all' high-level languages cleanly. It is the common code representation
+ used throughout all phases of the LLVM compilation strategy.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="introduction">Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The LLVM code representation is designed to be used in three different forms:
+ as an in-memory compiler IR, as an on-disk bitcode representation (suitable
+ for fast loading by a Just-In-Time compiler), and as a human readable
+ assembly language representation. This allows LLVM to provide a powerful
+ intermediate representation for efficient compiler transformations and
+ analysis, while providing a natural means to debug and visualize the
+ transformations. The three different forms of LLVM are all equivalent. This
+ document describes the human readable representation and notation.</p>
+
+<p>The LLVM representation aims to be light-weight and low-level while being
+ expressive, typed, and extensible at the same time. It aims to be a
+ "universal IR" of sorts, by being at a low enough level that high-level ideas
+ may be cleanly mapped to it (similar to how microprocessors are "universal
+ IR's", allowing many source languages to be mapped to them). By providing
+ type information, LLVM can be used as the target of optimizations: for
+ example, through pointer analysis, it can be proven that a C automatic
+ variable is never accessed outside of the current function, allowing it to
+ be promoted to a simple SSA value instead of a memory location.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="wellformed">Well-Formedness</a>
+</h4>
+
+<div>
+
+<p>It is important to note that this document describes 'well formed' LLVM
+ assembly language. There is a difference between what the parser accepts and
+ what is considered 'well formed'. For example, the following instruction is
+ syntactically okay, but not well formed:</p>
+
+<pre class="doc_code">
+%x = <a href="#i_add">add</a> i32 1, %x
+</pre>
+
+<p>because the definition of <tt>%x</tt> does not dominate all of its uses. The
+ LLVM infrastructure provides a verification pass that may be used to verify
+ that an LLVM module is well formed. This pass is automatically run by the
+ parser after parsing input assembly and by the optimizer before it outputs
+ bitcode. The violations pointed out by the verifier pass indicate bugs in
+ transformation passes or input to the parser.</p>
+
+</div>
+
+</div>
+
+<!-- Describe the typesetting conventions here. -->
+
+<!-- *********************************************************************** -->
+<h2><a name="identifiers">Identifiers</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>LLVM identifiers come in two basic types: global and local. Global
+ identifiers (functions, global variables) begin with the <tt>'@'</tt>
+ character. Local identifiers (register names, types) begin with
+ the <tt>'%'</tt> character. Additionally, there are three different formats
+ for identifiers, for different purposes:</p>
+
+<ol>
+ <li>Named values are represented as a string of characters with their prefix.
+ For example, <tt>%foo</tt>, <tt>@DivisionByZero</tt>,
+ <tt>%a.really.long.identifier</tt>. The actual regular expression used is
+ '<tt>[%@][a-zA-Z$._][a-zA-Z$._0-9]*</tt>'. Identifiers which require
+ other characters in their names can be surrounded with quotes. Special
+ characters may be escaped using <tt>"\xx"</tt> where <tt>xx</tt> is the
+ ASCII code for the character in hexadecimal. In this way, any character
+ can be used in a name value, even quotes themselves.</li>
+
+ <li>Unnamed values are represented as an unsigned numeric value with their
+ prefix. For example, <tt>%12</tt>, <tt>@2</tt>, <tt>%44</tt>.</li>
+
+ <li>Constants, which are described in a <a href="#constants">section about
+ constants</a>, below.</li>
+</ol>
+
+<p>LLVM requires that values start with a prefix for two reasons: Compilers
+ don't need to worry about name clashes with reserved words, and the set of
+ reserved words may be expanded in the future without penalty. Additionally,
+ unnamed identifiers allow a compiler to quickly come up with a temporary
+ variable without having to avoid symbol table conflicts.</p>
+
+<p>Reserved words in LLVM are very similar to reserved words in other
+ languages. There are keywords for different opcodes
+ ('<tt><a href="#i_add">add</a></tt>',
+ '<tt><a href="#i_bitcast">bitcast</a></tt>',
+ '<tt><a href="#i_ret">ret</a></tt>', etc...), for primitive type names
+ ('<tt><a href="#t_void">void</a></tt>',
+ '<tt><a href="#t_primitive">i32</a></tt>', etc...), and others. These
+ reserved words cannot conflict with variable names, because none of them
+ start with a prefix character (<tt>'%'</tt> or <tt>'@'</tt>).</p>
+
+<p>Here is an example of LLVM code to multiply the integer variable
+ '<tt>%X</tt>' by 8:</p>
+
+<p>The easy way:</p>
+
+<pre class="doc_code">
+%result = <a href="#i_mul">mul</a> i32 %X, 8
+</pre>
+
+<p>After strength reduction:</p>
+
+<pre class="doc_code">
+%result = <a href="#i_shl">shl</a> i32 %X, i8 3
+</pre>
+
+<p>And the hard way:</p>
+
+<pre class="doc_code">
+%0 = <a href="#i_add">add</a> i32 %X, %X <i>; yields {i32}:%0</i>
+%1 = <a href="#i_add">add</a> i32 %0, %0 <i>; yields {i32}:%1</i>
+%result = <a href="#i_add">add</a> i32 %1, %1
+</pre>
+
+<p>This last way of multiplying <tt>%X</tt> by 8 illustrates several important
+ lexical features of LLVM:</p>
+
+<ol>
+ <li>Comments are delimited with a '<tt>;</tt>' and go until the end of
+ line.</li>
+
+ <li>Unnamed temporaries are created when the result of a computation is not
+ assigned to a named value.</li>
+
+ <li>Unnamed temporaries are numbered sequentially</li>
+</ol>
+
+<p>It also shows a convention that we follow in this document. When
+ demonstrating instructions, we will follow an instruction with a comment that
+ defines the type and name of value produced. Comments are shown in italic
+ text.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="highlevel">High Level Structure</a></h2>
+<!-- *********************************************************************** -->
+<div>
+<!-- ======================================================================= -->
+<h3>
+ <a name="modulestructure">Module Structure</a>
+</h3>
+
+<div>
+
+<p>LLVM programs are composed of <tt>Module</tt>s, each of which is a
+ translation unit of the input programs. Each module consists of functions,
+ global variables, and symbol table entries. Modules may be combined together
+ with the LLVM linker, which merges function (and global variable)
+ definitions, resolves forward declarations, and merges symbol table
+ entries. Here is an example of the "hello world" module:</p>
+
+<pre class="doc_code">
+<i>; Declare the string constant as a global constant.</i>&nbsp;
+<a href="#identifiers">@.str</a> = <a href="#linkage_private">private</a>&nbsp;<a href="#globalvars">unnamed_addr</a>&nbsp;<a href="#globalvars">constant</a>&nbsp;<a href="#t_array">[13 x i8]</a> c"hello world\0A\00"&nbsp;
+
+<i>; External declaration of the puts function</i>&nbsp;
+<a href="#functionstructure">declare</a> i32 @puts(i8* <a href="#nocapture">nocapture</a>) <a href="#fnattrs">nounwind</a>&nbsp;
+
+<i>; Definition of main function</i>
+define i32 @main() { <i>; i32()* </i>&nbsp;
+ <i>; Convert [13 x i8]* to i8 *...</i>&nbsp;
+ %cast210 = <a href="#i_getelementptr">getelementptr</a> [13 x i8]* @.str, i64 0, i64 0
+
+ <i>; Call puts function to write out the string to stdout.</i>&nbsp;
+ <a href="#i_call">call</a> i32 @puts(i8* %cast210)
+ <a href="#i_ret">ret</a> i32 0&nbsp;
+}
+
+<i>; Named metadata</i>
+!1 = metadata !{i32 42}
+!foo = !{!1, null}
+</pre>
+
+<p>This example is made up of a <a href="#globalvars">global variable</a> named
+ "<tt>.str</tt>", an external declaration of the "<tt>puts</tt>" function,
+ a <a href="#functionstructure">function definition</a> for
+ "<tt>main</tt>" and <a href="#namedmetadatastructure">named metadata</a>
+ "<tt>foo</tt>".</p>
+
+<p>In general, a module is made up of a list of global values (where both
+ functions and global variables are global values). Global values are
+ represented by a pointer to a memory location (in this case, a pointer to an
+ array of char, and a pointer to a function), and have one of the
+ following <a href="#linkage">linkage types</a>.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="linkage">Linkage Types</a>
+</h3>
+
+<div>
+
+<p>All Global Variables and Functions have one of the following types of
+ linkage:</p>
+
+<dl>
+ <dt><tt><b><a name="linkage_private">private</a></b></tt></dt>
+ <dd>Global values with "<tt>private</tt>" linkage are only directly accessible
+ by objects in the current module. In particular, linking code into a
+ module with an private global value may cause the private to be renamed as
+ necessary to avoid collisions. Because the symbol is private to the
+ module, all references can be updated. This doesn't show up in any symbol
+ table in the object file.</dd>
+
+ <dt><tt><b><a name="linkage_linker_private">linker_private</a></b></tt></dt>
+ <dd>Similar to <tt>private</tt>, but the symbol is passed through the
+ assembler and evaluated by the linker. Unlike normal strong symbols, they
+ are removed by the linker from the final linked image (executable or
+ dynamic library).</dd>
+
+ <dt><tt><b><a name="linkage_linker_private_weak">linker_private_weak</a></b></tt></dt>
+ <dd>Similar to "<tt>linker_private</tt>", but the symbol is weak. Note that
+ <tt>linker_private_weak</tt> symbols are subject to coalescing by the
+ linker. The symbols are removed by the linker from the final linked image
+ (executable or dynamic library).</dd>
+
+ <dt><tt><b><a name="linkage_internal">internal</a></b></tt></dt>
+ <dd>Similar to private, but the value shows as a local symbol
+ (<tt>STB_LOCAL</tt> in the case of ELF) in the object file. This
+ corresponds to the notion of the '<tt>static</tt>' keyword in C.</dd>
+
+ <dt><tt><b><a name="linkage_available_externally">available_externally</a></b></tt></dt>
+ <dd>Globals with "<tt>available_externally</tt>" linkage are never emitted
+ into the object file corresponding to the LLVM module. They exist to
+ allow inlining and other optimizations to take place given knowledge of
+ the definition of the global, which is known to be somewhere outside the
+ module. Globals with <tt>available_externally</tt> linkage are allowed to
+ be discarded at will, and are otherwise the same as <tt>linkonce_odr</tt>.
+ This linkage type is only allowed on definitions, not declarations.</dd>
+
+ <dt><tt><b><a name="linkage_linkonce">linkonce</a></b></tt></dt>
+ <dd>Globals with "<tt>linkonce</tt>" linkage are merged with other globals of
+ the same name when linkage occurs. This can be used to implement
+ some forms of inline functions, templates, or other code which must be
+ generated in each translation unit that uses it, but where the body may
+ be overridden with a more definitive definition later. Unreferenced
+ <tt>linkonce</tt> globals are allowed to be discarded. Note that
+ <tt>linkonce</tt> linkage does not actually allow the optimizer to
+ inline the body of this function into callers because it doesn't know if
+ this definition of the function is the definitive definition within the
+ program or whether it will be overridden by a stronger definition.
+ To enable inlining and other optimizations, use "<tt>linkonce_odr</tt>"
+ linkage.</dd>
+
+ <dt><tt><b><a name="linkage_weak">weak</a></b></tt></dt>
+ <dd>"<tt>weak</tt>" linkage has the same merging semantics as
+ <tt>linkonce</tt> linkage, except that unreferenced globals with
+ <tt>weak</tt> linkage may not be discarded. This is used for globals that
+ are declared "weak" in C source code.</dd>
+
+ <dt><tt><b><a name="linkage_common">common</a></b></tt></dt>
+ <dd>"<tt>common</tt>" linkage is most similar to "<tt>weak</tt>" linkage, but
+ they are used for tentative definitions in C, such as "<tt>int X;</tt>" at
+ global scope.
+ Symbols with "<tt>common</tt>" linkage are merged in the same way as
+ <tt>weak symbols</tt>, and they may not be deleted if unreferenced.
+ <tt>common</tt> symbols may not have an explicit section,
+ must have a zero initializer, and may not be marked '<a
+ href="#globalvars"><tt>constant</tt></a>'. Functions and aliases may not
+ have common linkage.</dd>
+
+
+ <dt><tt><b><a name="linkage_appending">appending</a></b></tt></dt>
+ <dd>"<tt>appending</tt>" linkage may only be applied to global variables of
+ pointer to array type. When two global variables with appending linkage
+ are linked together, the two global arrays are appended together. This is
+ the LLVM, typesafe, equivalent of having the system linker append together
+ "sections" with identical names when .o files are linked.</dd>
+
+ <dt><tt><b><a name="linkage_externweak">extern_weak</a></b></tt></dt>
+ <dd>The semantics of this linkage follow the ELF object file model: the symbol
+ is weak until linked, if not linked, the symbol becomes null instead of
+ being an undefined reference.</dd>
+
+ <dt><tt><b><a name="linkage_linkonce_odr">linkonce_odr</a></b></tt></dt>
+ <dt><tt><b><a name="linkage_weak_odr">weak_odr</a></b></tt></dt>
+ <dd>Some languages allow differing globals to be merged, such as two functions
+ with different semantics. Other languages, such as <tt>C++</tt>, ensure
+ that only equivalent globals are ever merged (the "one definition rule"
+ &mdash; "ODR"). Such languages can use the <tt>linkonce_odr</tt>
+ and <tt>weak_odr</tt> linkage types to indicate that the global will only
+ be merged with equivalent globals. These linkage types are otherwise the
+ same as their non-<tt>odr</tt> versions.</dd>
+
+ <dt><tt><b><a name="linkage_linkonce_odr_auto_hide">linkonce_odr_auto_hide</a></b></tt></dt>
+ <dd>Similar to "<tt>linkonce_odr</tt>", but nothing in the translation unit
+ takes the address of this definition. For instance, functions that had an
+ inline definition, but the compiler decided not to inline it.
+ <tt>linkonce_odr_auto_hide</tt> may have only <tt>default</tt> visibility.
+ The symbols are removed by the linker from the final linked image
+ (executable or dynamic library).</dd>
+
+ <dt><tt><b><a name="linkage_external">external</a></b></tt></dt>
+ <dd>If none of the above identifiers are used, the global is externally
+ visible, meaning that it participates in linkage and can be used to
+ resolve external symbol references.</dd>
+</dl>
+
+<p>The next two types of linkage are targeted for Microsoft Windows platform
+ only. They are designed to support importing (exporting) symbols from (to)
+ DLLs (Dynamic Link Libraries).</p>
+
+<dl>
+ <dt><tt><b><a name="linkage_dllimport">dllimport</a></b></tt></dt>
+ <dd>"<tt>dllimport</tt>" linkage causes the compiler to reference a function
+ or variable via a global pointer to a pointer that is set up by the DLL
+ exporting the symbol. On Microsoft Windows targets, the pointer name is
+ formed by combining <code>__imp_</code> and the function or variable
+ name.</dd>
+
+ <dt><tt><b><a name="linkage_dllexport">dllexport</a></b></tt></dt>
+ <dd>"<tt>dllexport</tt>" linkage causes the compiler to provide a global
+ pointer to a pointer in a DLL, so that it can be referenced with the
+ <tt>dllimport</tt> attribute. On Microsoft Windows targets, the pointer
+ name is formed by combining <code>__imp_</code> and the function or
+ variable name.</dd>
+</dl>
+
+<p>For example, since the "<tt>.LC0</tt>" variable is defined to be internal, if
+ another module defined a "<tt>.LC0</tt>" variable and was linked with this
+ one, one of the two would be renamed, preventing a collision. Since
+ "<tt>main</tt>" and "<tt>puts</tt>" are external (i.e., lacking any linkage
+ declarations), they are accessible outside of the current module.</p>
+
+<p>It is illegal for a function <i>declaration</i> to have any linkage type
+ other than <tt>external</tt>, <tt>dllimport</tt>
+ or <tt>extern_weak</tt>.</p>
+
+<p>Aliases can have only <tt>external</tt>, <tt>internal</tt>, <tt>weak</tt>
+ or <tt>weak_odr</tt> linkages.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="callingconv">Calling Conventions</a>
+</h3>
+
+<div>
+
+<p>LLVM <a href="#functionstructure">functions</a>, <a href="#i_call">calls</a>
+ and <a href="#i_invoke">invokes</a> can all have an optional calling
+ convention specified for the call. The calling convention of any pair of
+ dynamic caller/callee must match, or the behavior of the program is
+ undefined. The following calling conventions are supported by LLVM, and more
+ may be added in the future:</p>
+
+<dl>
+ <dt><b>"<tt>ccc</tt>" - The C calling convention</b>:</dt>
+ <dd>This calling convention (the default if no other calling convention is
+ specified) matches the target C calling conventions. This calling
+ convention supports varargs function calls and tolerates some mismatch in
+ the declared prototype and implemented declaration of the function (as
+ does normal C).</dd>
+
+ <dt><b>"<tt>fastcc</tt>" - The fast calling convention</b>:</dt>
+ <dd>This calling convention attempts to make calls as fast as possible
+ (e.g. by passing things in registers). This calling convention allows the
+ target to use whatever tricks it wants to produce fast code for the
+ target, without having to conform to an externally specified ABI
+ (Application Binary Interface).
+ <a href="CodeGenerator.html#tailcallopt">Tail calls can only be optimized
+ when this or the GHC convention is used.</a> This calling convention
+ does not support varargs and requires the prototype of all callees to
+ exactly match the prototype of the function definition.</dd>
+
+ <dt><b>"<tt>coldcc</tt>" - The cold calling convention</b>:</dt>
+ <dd>This calling convention attempts to make code in the caller as efficient
+ as possible under the assumption that the call is not commonly executed.
+ As such, these calls often preserve all registers so that the call does
+ not break any live ranges in the caller side. This calling convention
+ does not support varargs and requires the prototype of all callees to
+ exactly match the prototype of the function definition.</dd>
+
+ <dt><b>"<tt>cc <em>10</em></tt>" - GHC convention</b>:</dt>
+ <dd>This calling convention has been implemented specifically for use by the
+ <a href="http://www.haskell.org/ghc">Glasgow Haskell Compiler (GHC)</a>.
+ It passes everything in registers, going to extremes to achieve this by
+ disabling callee save registers. This calling convention should not be
+ used lightly but only for specific situations such as an alternative to
+ the <em>register pinning</em> performance technique often used when
+ implementing functional programming languages.At the moment only X86
+ supports this convention and it has the following limitations:
+ <ul>
+ <li>On <em>X86-32</em> only supports up to 4 bit type parameters. No
+ floating point types are supported.</li>
+ <li>On <em>X86-64</em> only supports up to 10 bit type parameters and
+ 6 floating point parameters.</li>
+ </ul>
+ This calling convention supports
+ <a href="CodeGenerator.html#tailcallopt">tail call optimization</a> but
+ requires both the caller and callee are using it.
+ </dd>
+
+ <dt><b>"<tt>cc &lt;<em>n</em>&gt;</tt>" - Numbered convention</b>:</dt>
+ <dd>Any calling convention may be specified by number, allowing
+ target-specific calling conventions to be used. Target specific calling
+ conventions start at 64.</dd>
+</dl>
+
+<p>More calling conventions can be added/defined on an as-needed basis, to
+ support Pascal conventions or any other well-known target-independent
+ convention.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="visibility">Visibility Styles</a>
+</h3>
+
+<div>
+
+<p>All Global Variables and Functions have one of the following visibility
+ styles:</p>
+
+<dl>
+ <dt><b>"<tt>default</tt>" - Default style</b>:</dt>
+ <dd>On targets that use the ELF object file format, default visibility means
+ that the declaration is visible to other modules and, in shared libraries,
+ means that the declared entity may be overridden. On Darwin, default
+ visibility means that the declaration is visible to other modules. Default
+ visibility corresponds to "external linkage" in the language.</dd>
+
+ <dt><b>"<tt>hidden</tt>" - Hidden style</b>:</dt>
+ <dd>Two declarations of an object with hidden visibility refer to the same
+ object if they are in the same shared object. Usually, hidden visibility
+ indicates that the symbol will not be placed into the dynamic symbol
+ table, so no other module (executable or shared library) can reference it
+ directly.</dd>
+
+ <dt><b>"<tt>protected</tt>" - Protected style</b>:</dt>
+ <dd>On ELF, protected visibility indicates that the symbol will be placed in
+ the dynamic symbol table, but that references within the defining module
+ will bind to the local symbol. That is, the symbol cannot be overridden by
+ another module.</dd>
+</dl>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="namedtypes">Named Types</a>
+</h3>
+
+<div>
+
+<p>LLVM IR allows you to specify name aliases for certain types. This can make
+ it easier to read the IR and make the IR more condensed (particularly when
+ recursive types are involved). An example of a name specification is:</p>
+
+<pre class="doc_code">
+%mytype = type { %mytype*, i32 }
+</pre>
+
+<p>You may give a name to any <a href="#typesystem">type</a> except
+ "<a href="#t_void">void</a>". Type name aliases may be used anywhere a type
+ is expected with the syntax "%mytype".</p>
+
+<p>Note that type names are aliases for the structural type that they indicate,
+ and that you can therefore specify multiple names for the same type. This
+ often leads to confusing behavior when dumping out a .ll file. Since LLVM IR
+ uses structural typing, the name is not part of the type. When printing out
+ LLVM IR, the printer will pick <em>one name</em> to render all types of a
+ particular shape. This means that if you have code where two different
+ source types end up having the same LLVM type, that the dumper will sometimes
+ print the "wrong" or unexpected type. This is an important design point and
+ isn't going to change.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="globalvars">Global Variables</a>
+</h3>
+
+<div>
+
+<p>Global variables define regions of memory allocated at compilation time
+ instead of run-time. Global variables may optionally be initialized, may
+ have an explicit section to be placed in, and may have an optional explicit
+ alignment specified.</p>
+
+<p>A variable may be defined as <tt>thread_local</tt>, which
+ means that it will not be shared by threads (each thread will have a
+ separated copy of the variable). Not all targets support thread-local
+ variables. Optionally, a TLS model may be specified:</p>
+
+<dl>
+ <dt><b><tt>localdynamic</tt></b>:</dt>
+ <dd>For variables that are only used within the current shared library.</dd>
+
+ <dt><b><tt>initialexec</tt></b>:</dt>
+ <dd>For variables in modules that will not be loaded dynamically.</dd>
+
+ <dt><b><tt>localexec</tt></b>:</dt>
+ <dd>For variables defined in the executable and only used within it.</dd>
+</dl>
+
+<p>The models correspond to the ELF TLS models; see
+ <a href="http://people.redhat.com/drepper/tls.pdf">ELF
+ Handling For Thread-Local Storage</a> for more information on under which
+ circumstances the different models may be used. The target may choose a
+ different TLS model if the specified model is not supported, or if a better
+ choice of model can be made.</p>
+
+<p>A variable may be defined as a global
+ "constant," which indicates that the contents of the variable
+ will <b>never</b> be modified (enabling better optimization, allowing the
+ global data to be placed in the read-only section of an executable, etc).
+ Note that variables that need runtime initialization cannot be marked
+ "constant" as there is a store to the variable.</p>
+
+<p>LLVM explicitly allows <em>declarations</em> of global variables to be marked
+ constant, even if the final definition of the global is not. This capability
+ can be used to enable slightly better optimization of the program, but
+ requires the language definition to guarantee that optimizations based on the
+ 'constantness' are valid for the translation units that do not include the
+ definition.</p>
+
+<p>As SSA values, global variables define pointer values that are in scope
+ (i.e. they dominate) all basic blocks in the program. Global variables
+ always define a pointer to their "content" type because they describe a
+ region of memory, and all memory objects in LLVM are accessed through
+ pointers.</p>
+
+<p>Global variables can be marked with <tt>unnamed_addr</tt> which indicates
+ that the address is not significant, only the content. Constants marked
+ like this can be merged with other constants if they have the same
+ initializer. Note that a constant with significant address <em>can</em>
+ be merged with a <tt>unnamed_addr</tt> constant, the result being a
+ constant whose address is significant.</p>
+
+<p>A global variable may be declared to reside in a target-specific numbered
+ address space. For targets that support them, address spaces may affect how
+ optimizations are performed and/or what target instructions are used to
+ access the variable. The default address space is zero. The address space
+ qualifier must precede any other attributes.</p>
+
+<p>LLVM allows an explicit section to be specified for globals. If the target
+ supports it, it will emit globals to the section specified.</p>
+
+<p>An explicit alignment may be specified for a global, which must be a power
+ of 2. If not present, or if the alignment is set to zero, the alignment of
+ the global is set by the target to whatever it feels convenient. If an
+ explicit alignment is specified, the global is forced to have exactly that
+ alignment. Targets and optimizers are not allowed to over-align the global
+ if the global has an assigned section. In this case, the extra alignment
+ could be observable: for example, code could assume that the globals are
+ densely packed in their section and try to iterate over them as an array,
+ alignment padding would break this iteration.</p>
+
+<p>For example, the following defines a global in a numbered address space with
+ an initializer, section, and alignment:</p>
+
+<pre class="doc_code">
+@G = addrspace(5) constant float 1.0, section "foo", align 4
+</pre>
+
+<p>The following example defines a thread-local global with
+ the <tt>initialexec</tt> TLS model:</p>
+
+<pre class="doc_code">
+@G = thread_local(initialexec) global i32 0, align 4
+</pre>
+
+</div>
+
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="functionstructure">Functions</a>
+</h3>
+
+<div>
+
+<p>LLVM function definitions consist of the "<tt>define</tt>" keyword, an
+ optional <a href="#linkage">linkage type</a>, an optional
+ <a href="#visibility">visibility style</a>, an optional
+ <a href="#callingconv">calling convention</a>,
+ an optional <tt>unnamed_addr</tt> attribute, a return type, an optional
+ <a href="#paramattrs">parameter attribute</a> for the return type, a function
+ name, a (possibly empty) argument list (each with optional
+ <a href="#paramattrs">parameter attributes</a>), optional
+ <a href="#fnattrs">function attributes</a>, an optional section, an optional
+ alignment, an optional <a href="#gc">garbage collector name</a>, an opening
+ curly brace, a list of basic blocks, and a closing curly brace.</p>
+
+<p>LLVM function declarations consist of the "<tt>declare</tt>" keyword, an
+ optional <a href="#linkage">linkage type</a>, an optional
+ <a href="#visibility">visibility style</a>, an optional
+ <a href="#callingconv">calling convention</a>,
+ an optional <tt>unnamed_addr</tt> attribute, a return type, an optional
+ <a href="#paramattrs">parameter attribute</a> for the return type, a function
+ name, a possibly empty list of arguments, an optional alignment, and an
+ optional <a href="#gc">garbage collector name</a>.</p>
+
+<p>A function definition contains a list of basic blocks, forming the CFG
+ (Control Flow Graph) for the function. Each basic block may optionally start
+ with a label (giving the basic block a symbol table entry), contains a list
+ of instructions, and ends with a <a href="#terminators">terminator</a>
+ instruction (such as a branch or function return).</p>
+
+<p>The first basic block in a function is special in two ways: it is immediately
+ executed on entrance to the function, and it is not allowed to have
+ predecessor basic blocks (i.e. there can not be any branches to the entry
+ block of a function). Because the block can have no predecessors, it also
+ cannot have any <a href="#i_phi">PHI nodes</a>.</p>
+
+<p>LLVM allows an explicit section to be specified for functions. If the target
+ supports it, it will emit functions to the section specified.</p>
+
+<p>An explicit alignment may be specified for a function. If not present, or if
+ the alignment is set to zero, the alignment of the function is set by the
+ target to whatever it feels convenient. If an explicit alignment is
+ specified, the function is forced to have at least that much alignment. All
+ alignments must be a power of 2.</p>
+
+<p>If the <tt>unnamed_addr</tt> attribute is given, the address is know to not
+ be significant and two identical functions can be merged.</p>
+
+<h5>Syntax:</h5>
+<pre class="doc_code">
+define [<a href="#linkage">linkage</a>] [<a href="#visibility">visibility</a>]
+ [<a href="#callingconv">cconv</a>] [<a href="#paramattrs">ret attrs</a>]
+ &lt;ResultType&gt; @&lt;FunctionName&gt; ([argument list])
+ [<a href="#fnattrs">fn Attrs</a>] [section "name"] [align N]
+ [<a href="#gc">gc</a>] { ... }
+</pre>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="aliasstructure">Aliases</a>
+</h3>
+
+<div>
+
+<p>Aliases act as "second name" for the aliasee value (which can be either
+ function, global variable, another alias or bitcast of global value). Aliases
+ may have an optional <a href="#linkage">linkage type</a>, and an
+ optional <a href="#visibility">visibility style</a>.</p>
+
+<h5>Syntax:</h5>
+<pre class="doc_code">
+@&lt;Name&gt; = alias [Linkage] [Visibility] &lt;AliaseeTy&gt; @&lt;Aliasee&gt;
+</pre>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="namedmetadatastructure">Named Metadata</a>
+</h3>
+
+<div>
+
+<p>Named metadata is a collection of metadata. <a href="#metadata">Metadata
+ nodes</a> (but not metadata strings) are the only valid operands for
+ a named metadata.</p>
+
+<h5>Syntax:</h5>
+<pre class="doc_code">
+; Some unnamed metadata nodes, which are referenced by the named metadata.
+!0 = metadata !{metadata !"zero"}
+!1 = metadata !{metadata !"one"}
+!2 = metadata !{metadata !"two"}
+; A named metadata.
+!name = !{!0, !1, !2}
+</pre>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="paramattrs">Parameter Attributes</a>
+</h3>
+
+<div>
+
+<p>The return type and each parameter of a function type may have a set of
+ <i>parameter attributes</i> associated with them. Parameter attributes are
+ used to communicate additional information about the result or parameters of
+ a function. Parameter attributes are considered to be part of the function,
+ not of the function type, so functions with different parameter attributes
+ can have the same function type.</p>
+
+<p>Parameter attributes are simple keywords that follow the type specified. If
+ multiple parameter attributes are needed, they are space separated. For
+ example:</p>
+
+<pre class="doc_code">
+declare i32 @printf(i8* noalias nocapture, ...)
+declare i32 @atoi(i8 zeroext)
+declare signext i8 @returns_signed_char()
+</pre>
+
+<p>Note that any attributes for the function result (<tt>nounwind</tt>,
+ <tt>readonly</tt>) come immediately after the argument list.</p>
+
+<p>Currently, only the following parameter attributes are defined:</p>
+
+<dl>
+ <dt><tt><b>zeroext</b></tt></dt>
+ <dd>This indicates to the code generator that the parameter or return value
+ should be zero-extended to the extent required by the target's ABI (which
+ is usually 32-bits, but is 8-bits for a i1 on x86-64) by the caller (for a
+ parameter) or the callee (for a return value).</dd>
+
+ <dt><tt><b>signext</b></tt></dt>
+ <dd>This indicates to the code generator that the parameter or return value
+ should be sign-extended to the extent required by the target's ABI (which
+ is usually 32-bits) by the caller (for a parameter) or the callee (for a
+ return value).</dd>
+
+ <dt><tt><b>inreg</b></tt></dt>
+ <dd>This indicates that this parameter or return value should be treated in a
+ special target-dependent fashion during while emitting code for a function
+ call or return (usually, by putting it in a register as opposed to memory,
+ though some targets use it to distinguish between two different kinds of
+ registers). Use of this attribute is target-specific.</dd>
+
+ <dt><tt><b><a name="byval">byval</a></b></tt></dt>
+ <dd><p>This indicates that the pointer parameter should really be passed by
+ value to the function. The attribute implies that a hidden copy of the
+ pointee
+ is made between the caller and the callee, so the callee is unable to
+ modify the value in the caller. This attribute is only valid on LLVM
+ pointer arguments. It is generally used to pass structs and arrays by
+ value, but is also valid on pointers to scalars. The copy is considered
+ to belong to the caller not the callee (for example,
+ <tt><a href="#readonly">readonly</a></tt> functions should not write to
+ <tt>byval</tt> parameters). This is not a valid attribute for return
+ values.</p>
+
+ <p>The byval attribute also supports specifying an alignment with
+ the align attribute. It indicates the alignment of the stack slot to
+ form and the known alignment of the pointer specified to the call site. If
+ the alignment is not specified, then the code generator makes a
+ target-specific assumption.</p></dd>
+
+ <dt><tt><b><a name="sret">sret</a></b></tt></dt>
+ <dd>This indicates that the pointer parameter specifies the address of a
+ structure that is the return value of the function in the source program.
+ This pointer must be guaranteed by the caller to be valid: loads and
+ stores to the structure may be assumed by the callee to not to trap. This
+ may only be applied to the first parameter. This is not a valid attribute
+ for return values. </dd>
+
+ <dt><tt><b><a name="noalias">noalias</a></b></tt></dt>
+ <dd>This indicates that pointer values
+ <a href="#pointeraliasing"><i>based</i></a> on the argument or return
+ value do not alias pointer values which are not <i>based</i> on it,
+ ignoring certain "irrelevant" dependencies.
+ For a call to the parent function, dependencies between memory
+ references from before or after the call and from those during the call
+ are "irrelevant" to the <tt>noalias</tt> keyword for the arguments and
+ return value used in that call.
+ The caller shares the responsibility with the callee for ensuring that
+ these requirements are met.
+ For further details, please see the discussion of the NoAlias response in
+ <a href="AliasAnalysis.html#MustMayNo">alias analysis</a>.<br>
+<br>
+ Note that this definition of <tt>noalias</tt> is intentionally
+ similar to the definition of <tt>restrict</tt> in C99 for function
+ arguments, though it is slightly weaker.
+<br>
+ For function return values, C99's <tt>restrict</tt> is not meaningful,
+ while LLVM's <tt>noalias</tt> is.
+ </dd>
+
+ <dt><tt><b><a name="nocapture">nocapture</a></b></tt></dt>
+ <dd>This indicates that the callee does not make any copies of the pointer
+ that outlive the callee itself. This is not a valid attribute for return
+ values.</dd>
+
+ <dt><tt><b><a name="nest">nest</a></b></tt></dt>
+ <dd>This indicates that the pointer parameter can be excised using the
+ <a href="#int_trampoline">trampoline intrinsics</a>. This is not a valid
+ attribute for return values.</dd>
+</dl>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="gc">Garbage Collector Names</a>
+</h3>
+
+<div>
+
+<p>Each function may specify a garbage collector name, which is simply a
+ string:</p>
+
+<pre class="doc_code">
+define void @f() gc "name" { ... }
+</pre>
+
+<p>The compiler declares the supported values of <i>name</i>. Specifying a
+ collector which will cause the compiler to alter its output in order to
+ support the named garbage collection algorithm.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="fnattrs">Function Attributes</a>
+</h3>
+
+<div>
+
+<p>Function attributes are set to communicate additional information about a
+ function. Function attributes are considered to be part of the function, not
+ of the function type, so functions with different parameter attributes can
+ have the same function type.</p>
+
+<p>Function attributes are simple keywords that follow the type specified. If
+ multiple attributes are needed, they are space separated. For example:</p>
+
+<pre class="doc_code">
+define void @f() noinline { ... }
+define void @f() alwaysinline { ... }
+define void @f() alwaysinline optsize { ... }
+define void @f() optsize { ... }
+</pre>
+
+<dl>
+ <dt><tt><b>address_safety</b></tt></dt>
+ <dd>This attribute indicates that the address safety analysis
+ is enabled for this function. </dd>
+
+ <dt><tt><b>alignstack(&lt;<em>n</em>&gt;)</b></tt></dt>
+ <dd>This attribute indicates that, when emitting the prologue and epilogue,
+ the backend should forcibly align the stack pointer. Specify the
+ desired alignment, which must be a power of two, in parentheses.
+
+ <dt><tt><b>alwaysinline</b></tt></dt>
+ <dd>This attribute indicates that the inliner should attempt to inline this
+ function into callers whenever possible, ignoring any active inlining size
+ threshold for this caller.</dd>
+
+ <dt><tt><b>nonlazybind</b></tt></dt>
+ <dd>This attribute suppresses lazy symbol binding for the function. This
+ may make calls to the function faster, at the cost of extra program
+ startup time if the function is not called during program startup.</dd>
+
+ <dt><tt><b>inlinehint</b></tt></dt>
+ <dd>This attribute indicates that the source code contained a hint that inlining
+ this function is desirable (such as the "inline" keyword in C/C++). It
+ is just a hint; it imposes no requirements on the inliner.</dd>
+
+ <dt><tt><b>naked</b></tt></dt>
+ <dd>This attribute disables prologue / epilogue emission for the function.
+ This can have very system-specific consequences.</dd>
+
+ <dt><tt><b>noimplicitfloat</b></tt></dt>
+ <dd>This attributes disables implicit floating point instructions.</dd>
+
+ <dt><tt><b>noinline</b></tt></dt>
+ <dd>This attribute indicates that the inliner should never inline this
+ function in any situation. This attribute may not be used together with
+ the <tt>alwaysinline</tt> attribute.</dd>
+
+ <dt><tt><b>noredzone</b></tt></dt>
+ <dd>This attribute indicates that the code generator should not use a red
+ zone, even if the target-specific ABI normally permits it.</dd>
+
+ <dt><tt><b>noreturn</b></tt></dt>
+ <dd>This function attribute indicates that the function never returns
+ normally. This produces undefined behavior at runtime if the function
+ ever does dynamically return.</dd>
+
+ <dt><tt><b>nounwind</b></tt></dt>
+ <dd>This function attribute indicates that the function never returns with an
+ unwind or exceptional control flow. If the function does unwind, its
+ runtime behavior is undefined.</dd>
+
+ <dt><tt><b>optsize</b></tt></dt>
+ <dd>This attribute suggests that optimization passes and code generator passes
+ make choices that keep the code size of this function low, and otherwise
+ do optimizations specifically to reduce code size.</dd>
+
+ <dt><tt><b>readnone</b></tt></dt>
+ <dd>This attribute indicates that the function computes its result (or decides
+ to unwind an exception) based strictly on its arguments, without
+ dereferencing any pointer arguments or otherwise accessing any mutable
+ state (e.g. memory, control registers, etc) visible to caller functions.
+ It does not write through any pointer arguments
+ (including <tt><a href="#byval">byval</a></tt> arguments) and never
+ changes any state visible to callers. This means that it cannot unwind
+ exceptions by calling the <tt>C++</tt> exception throwing methods.</dd>
+
+ <dt><tt><b><a name="readonly">readonly</a></b></tt></dt>
+ <dd>This attribute indicates that the function does not write through any
+ pointer arguments (including <tt><a href="#byval">byval</a></tt>
+ arguments) or otherwise modify any state (e.g. memory, control registers,
+ etc) visible to caller functions. It may dereference pointer arguments
+ and read state that may be set in the caller. A readonly function always
+ returns the same value (or unwinds an exception identically) when called
+ with the same set of arguments and global state. It cannot unwind an
+ exception by calling the <tt>C++</tt> exception throwing methods.</dd>
+
+ <dt><tt><b><a name="returns_twice">returns_twice</a></b></tt></dt>
+ <dd>This attribute indicates that this function can return twice. The
+ C <code>setjmp</code> is an example of such a function. The compiler
+ disables some optimizations (like tail calls) in the caller of these
+ functions.</dd>
+
+ <dt><tt><b><a name="ssp">ssp</a></b></tt></dt>
+ <dd>This attribute indicates that the function should emit a stack smashing
+ protector. It is in the form of a "canary"&mdash;a random value placed on
+ the stack before the local variables that's checked upon return from the
+ function to see if it has been overwritten. A heuristic is used to
+ determine if a function needs stack protectors or not.<br>
+<br>
+ If a function that has an <tt>ssp</tt> attribute is inlined into a
+ function that doesn't have an <tt>ssp</tt> attribute, then the resulting
+ function will have an <tt>ssp</tt> attribute.</dd>
+
+ <dt><tt><b>sspreq</b></tt></dt>
+ <dd>This attribute indicates that the function should <em>always</em> emit a
+ stack smashing protector. This overrides
+ the <tt><a href="#ssp">ssp</a></tt> function attribute.<br>
+<br>
+ If a function that has an <tt>sspreq</tt> attribute is inlined into a
+ function that doesn't have an <tt>sspreq</tt> attribute or which has
+ an <tt>ssp</tt> attribute, then the resulting function will have
+ an <tt>sspreq</tt> attribute.</dd>
+
+ <dt><tt><b><a name="uwtable">uwtable</a></b></tt></dt>
+ <dd>This attribute indicates that the ABI being targeted requires that
+ an unwind table entry be produce for this function even if we can
+ show that no exceptions passes by it. This is normally the case for
+ the ELF x86-64 abi, but it can be disabled for some compilation
+ units.</dd>
+</dl>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="moduleasm">Module-Level Inline Assembly</a>
+</h3>
+
+<div>
+
+<p>Modules may contain "module-level inline asm" blocks, which corresponds to
+ the GCC "file scope inline asm" blocks. These blocks are internally
+ concatenated by LLVM and treated as a single unit, but may be separated in
+ the <tt>.ll</tt> file if desired. The syntax is very simple:</p>
+
+<pre class="doc_code">
+module asm "inline asm code goes here"
+module asm "more can go here"
+</pre>
+
+<p>The strings can contain any character by escaping non-printable characters.
+ The escape sequence used is simply "\xx" where "xx" is the two digit hex code
+ for the number.</p>
+
+<p>The inline asm code is simply printed to the machine code .s file when
+ assembly code is generated.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="datalayout">Data Layout</a>
+</h3>
+
+<div>
+
+<p>A module may specify a target specific data layout string that specifies how
+ data is to be laid out in memory. The syntax for the data layout is
+ simply:</p>
+
+<pre class="doc_code">
+target datalayout = "<i>layout specification</i>"
+</pre>
+
+<p>The <i>layout specification</i> consists of a list of specifications
+ separated by the minus sign character ('-'). Each specification starts with
+ a letter and may include other information after the letter to define some
+ aspect of the data layout. The specifications accepted are as follows:</p>
+
+<dl>
+ <dt><tt>E</tt></dt>
+ <dd>Specifies that the target lays out data in big-endian form. That is, the
+ bits with the most significance have the lowest address location.</dd>
+
+ <dt><tt>e</tt></dt>
+ <dd>Specifies that the target lays out data in little-endian form. That is,
+ the bits with the least significance have the lowest address
+ location.</dd>
+
+ <dt><tt>S<i>size</i></tt></dt>
+ <dd>Specifies the natural alignment of the stack in bits. Alignment promotion
+ of stack variables is limited to the natural stack alignment to avoid
+ dynamic stack realignment. The stack alignment must be a multiple of
+ 8-bits. If omitted, the natural stack alignment defaults to "unspecified",
+ which does not prevent any alignment promotions.</dd>
+
+ <dt><tt>p:<i>size</i>:<i>abi</i>:<i>pref</i></tt></dt>
+ <dd>This specifies the <i>size</i> of a pointer and its <i>abi</i> and
+ <i>preferred</i> alignments. All sizes are in bits. Specifying
+ the <i>pref</i> alignment is optional. If omitted, the
+ preceding <tt>:</tt> should be omitted too.</dd>
+
+ <dt><tt>i<i>size</i>:<i>abi</i>:<i>pref</i></tt></dt>
+ <dd>This specifies the alignment for an integer type of a given bit
+ <i>size</i>. The value of <i>size</i> must be in the range [1,2^23).</dd>
+
+ <dt><tt>v<i>size</i>:<i>abi</i>:<i>pref</i></tt></dt>
+ <dd>This specifies the alignment for a vector type of a given bit
+ <i>size</i>.</dd>
+
+ <dt><tt>f<i>size</i>:<i>abi</i>:<i>pref</i></tt></dt>
+ <dd>This specifies the alignment for a floating point type of a given bit
+ <i>size</i>. Only values of <i>size</i> that are supported by the target
+ will work. 32 (float) and 64 (double) are supported on all targets;
+ 80 or 128 (different flavors of long double) are also supported on some
+ targets.
+
+ <dt><tt>a<i>size</i>:<i>abi</i>:<i>pref</i></tt></dt>
+ <dd>This specifies the alignment for an aggregate type of a given bit
+ <i>size</i>.</dd>
+
+ <dt><tt>s<i>size</i>:<i>abi</i>:<i>pref</i></tt></dt>
+ <dd>This specifies the alignment for a stack object of a given bit
+ <i>size</i>.</dd>
+
+ <dt><tt>n<i>size1</i>:<i>size2</i>:<i>size3</i>...</tt></dt>
+ <dd>This specifies a set of native integer widths for the target CPU
+ in bits. For example, it might contain "n32" for 32-bit PowerPC,
+ "n32:64" for PowerPC 64, or "n8:16:32:64" for X86-64. Elements of
+ this set are considered to support most general arithmetic
+ operations efficiently.</dd>
+</dl>
+
+<p>When constructing the data layout for a given target, LLVM starts with a
+ default set of specifications which are then (possibly) overridden by the
+ specifications in the <tt>datalayout</tt> keyword. The default specifications
+ are given in this list:</p>
+
+<ul>
+ <li><tt>E</tt> - big endian</li>
+ <li><tt>p:64:64:64</tt> - 64-bit pointers with 64-bit alignment</li>
+ <li><tt>i1:8:8</tt> - i1 is 8-bit (byte) aligned</li>
+ <li><tt>i8:8:8</tt> - i8 is 8-bit (byte) aligned</li>
+ <li><tt>i16:16:16</tt> - i16 is 16-bit aligned</li>
+ <li><tt>i32:32:32</tt> - i32 is 32-bit aligned</li>
+ <li><tt>i64:32:64</tt> - i64 has ABI alignment of 32-bits but preferred
+ alignment of 64-bits</li>
+ <li><tt>f32:32:32</tt> - float is 32-bit aligned</li>
+ <li><tt>f64:64:64</tt> - double is 64-bit aligned</li>
+ <li><tt>v64:64:64</tt> - 64-bit vector is 64-bit aligned</li>
+ <li><tt>v128:128:128</tt> - 128-bit vector is 128-bit aligned</li>
+ <li><tt>a0:0:1</tt> - aggregates are 8-bit aligned</li>
+ <li><tt>s0:64:64</tt> - stack objects are 64-bit aligned</li>
+</ul>
+
+<p>When LLVM is determining the alignment for a given type, it uses the
+ following rules:</p>
+
+<ol>
+ <li>If the type sought is an exact match for one of the specifications, that
+ specification is used.</li>
+
+ <li>If no match is found, and the type sought is an integer type, then the
+ smallest integer type that is larger than the bitwidth of the sought type
+ is used. If none of the specifications are larger than the bitwidth then
+ the largest integer type is used. For example, given the default
+ specifications above, the i7 type will use the alignment of i8 (next
+ largest) while both i65 and i256 will use the alignment of i64 (largest
+ specified).</li>
+
+ <li>If no match is found, and the type sought is a vector type, then the
+ largest vector type that is smaller than the sought vector type will be
+ used as a fall back. This happens because &lt;128 x double&gt; can be
+ implemented in terms of 64 &lt;2 x double&gt;, for example.</li>
+</ol>
+
+<p>The function of the data layout string may not be what you expect. Notably,
+ this is not a specification from the frontend of what alignment the code
+ generator should use.</p>
+
+<p>Instead, if specified, the target data layout is required to match what the
+ ultimate <em>code generator</em> expects. This string is used by the
+ mid-level optimizers to
+ improve code, and this only works if it matches what the ultimate code
+ generator uses. If you would like to generate IR that does not embed this
+ target-specific detail into the IR, then you don't have to specify the
+ string. This will disable some optimizations that require precise layout
+ information, but this also prevents those optimizations from introducing
+ target specificity into the IR.</p>
+
+
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="pointeraliasing">Pointer Aliasing Rules</a>
+</h3>
+
+<div>
+
+<p>Any memory access must be done through a pointer value associated
+with an address range of the memory access, otherwise the behavior
+is undefined. Pointer values are associated with address ranges
+according to the following rules:</p>
+
+<ul>
+ <li>A pointer value is associated with the addresses associated with
+ any value it is <i>based</i> on.
+ <li>An address of a global variable is associated with the address
+ range of the variable's storage.</li>
+ <li>The result value of an allocation instruction is associated with
+ the address range of the allocated storage.</li>
+ <li>A null pointer in the default address-space is associated with
+ no address.</li>
+ <li>An integer constant other than zero or a pointer value returned
+ from a function not defined within LLVM may be associated with address
+ ranges allocated through mechanisms other than those provided by
+ LLVM. Such ranges shall not overlap with any ranges of addresses
+ allocated by mechanisms provided by LLVM.</li>
+</ul>
+
+<p>A pointer value is <i>based</i> on another pointer value according
+ to the following rules:</p>
+
+<ul>
+ <li>A pointer value formed from a
+ <tt><a href="#i_getelementptr">getelementptr</a></tt> operation
+ is <i>based</i> on the first operand of the <tt>getelementptr</tt>.</li>
+ <li>The result value of a
+ <tt><a href="#i_bitcast">bitcast</a></tt> is <i>based</i> on the operand
+ of the <tt>bitcast</tt>.</li>
+ <li>A pointer value formed by an
+ <tt><a href="#i_inttoptr">inttoptr</a></tt> is <i>based</i> on all
+ pointer values that contribute (directly or indirectly) to the
+ computation of the pointer's value.</li>
+ <li>The "<i>based</i> on" relationship is transitive.</li>
+</ul>
+
+<p>Note that this definition of <i>"based"</i> is intentionally
+ similar to the definition of <i>"based"</i> in C99, though it is
+ slightly weaker.</p>
+
+<p>LLVM IR does not associate types with memory. The result type of a
+<tt><a href="#i_load">load</a></tt> merely indicates the size and
+alignment of the memory from which to load, as well as the
+interpretation of the value. The first operand type of a
+<tt><a href="#i_store">store</a></tt> similarly only indicates the size
+and alignment of the store.</p>
+
+<p>Consequently, type-based alias analysis, aka TBAA, aka
+<tt>-fstrict-aliasing</tt>, is not applicable to general unadorned
+LLVM IR. <a href="#metadata">Metadata</a> may be used to encode
+additional information which specialized optimization passes may use
+to implement type-based alias analysis.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="volatile">Volatile Memory Accesses</a>
+</h3>
+
+<div>
+
+<p>Certain memory accesses, such as <a href="#i_load"><tt>load</tt></a>s, <a
+href="#i_store"><tt>store</tt></a>s, and <a
+href="#int_memcpy"><tt>llvm.memcpy</tt></a>s may be marked <tt>volatile</tt>.
+The optimizers must not change the number of volatile operations or change their
+order of execution relative to other volatile operations. The optimizers
+<i>may</i> change the order of volatile operations relative to non-volatile
+operations. This is not Java's "volatile" and has no cross-thread
+synchronization behavior.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="memmodel">Memory Model for Concurrent Operations</a>
+</h3>
+
+<div>
+
+<p>The LLVM IR does not define any way to start parallel threads of execution
+or to register signal handlers. Nonetheless, there are platform-specific
+ways to create them, and we define LLVM IR's behavior in their presence. This
+model is inspired by the C++0x memory model.</p>
+
+<p>For a more informal introduction to this model, see the
+<a href="Atomics.html">LLVM Atomic Instructions and Concurrency Guide</a>.
+
+<p>We define a <i>happens-before</i> partial order as the least partial order
+that</p>
+<ul>
+ <li>Is a superset of single-thread program order, and</li>
+ <li>When a <i>synchronizes-with</i> <tt>b</tt>, includes an edge from
+ <tt>a</tt> to <tt>b</tt>. <i>Synchronizes-with</i> pairs are introduced
+ by platform-specific techniques, like pthread locks, thread
+ creation, thread joining, etc., and by atomic instructions.
+ (See also <a href="#ordering">Atomic Memory Ordering Constraints</a>).
+ </li>
+</ul>
+
+<p>Note that program order does not introduce <i>happens-before</i> edges
+between a thread and signals executing inside that thread.</p>
+
+<p>Every (defined) read operation (load instructions, memcpy, atomic
+loads/read-modify-writes, etc.) <var>R</var> reads a series of bytes written by
+(defined) write operations (store instructions, atomic
+stores/read-modify-writes, memcpy, etc.). For the purposes of this section,
+initialized globals are considered to have a write of the initializer which is
+atomic and happens before any other read or write of the memory in question.
+For each byte of a read <var>R</var>, <var>R<sub>byte</sub></var> may see
+any write to the same byte, except:</p>
+
+<ul>
+ <li>If <var>write<sub>1</sub></var> happens before
+ <var>write<sub>2</sub></var>, and <var>write<sub>2</sub></var> happens
+ before <var>R<sub>byte</sub></var>, then <var>R<sub>byte</sub></var>
+ does not see <var>write<sub>1</sub></var>.
+ <li>If <var>R<sub>byte</sub></var> happens before
+ <var>write<sub>3</sub></var>, then <var>R<sub>byte</sub></var> does not
+ see <var>write<sub>3</sub></var>.
+</ul>
+
+<p>Given that definition, <var>R<sub>byte</sub></var> is defined as follows:
+<ul>
+ <li>If <var>R</var> is volatile, the result is target-dependent. (Volatile
+ is supposed to give guarantees which can support
+ <code>sig_atomic_t</code> in C/C++, and may be used for accesses to
+ addresses which do not behave like normal memory. It does not generally
+ provide cross-thread synchronization.)
+ <li>Otherwise, if there is no write to the same byte that happens before
+ <var>R<sub>byte</sub></var>, <var>R<sub>byte</sub></var> returns
+ <tt>undef</tt> for that byte.
+ <li>Otherwise, if <var>R<sub>byte</sub></var> may see exactly one write,
+ <var>R<sub>byte</sub></var> returns the value written by that
+ write.</li>
+ <li>Otherwise, if <var>R</var> is atomic, and all the writes
+ <var>R<sub>byte</sub></var> may see are atomic, it chooses one of the
+ values written. See the <a href="#ordering">Atomic Memory Ordering
+ Constraints</a> section for additional constraints on how the choice
+ is made.
+ <li>Otherwise <var>R<sub>byte</sub></var> returns <tt>undef</tt>.</li>
+</ul>
+
+<p><var>R</var> returns the value composed of the series of bytes it read.
+This implies that some bytes within the value may be <tt>undef</tt>
+<b>without</b> the entire value being <tt>undef</tt>. Note that this only
+defines the semantics of the operation; it doesn't mean that targets will
+emit more than one instruction to read the series of bytes.</p>
+
+<p>Note that in cases where none of the atomic intrinsics are used, this model
+places only one restriction on IR transformations on top of what is required
+for single-threaded execution: introducing a store to a byte which might not
+otherwise be stored is not allowed in general. (Specifically, in the case
+where another thread might write to and read from an address, introducing a
+store can change a load that may see exactly one write into a load that may
+see multiple writes.)</p>
+
+<!-- FIXME: This model assumes all targets where concurrency is relevant have
+a byte-size store which doesn't affect adjacent bytes. As far as I can tell,
+none of the backends currently in the tree fall into this category; however,
+there might be targets which care. If there are, we want a paragraph
+like the following:
+
+Targets may specify that stores narrower than a certain width are not
+available; on such a target, for the purposes of this model, treat any
+non-atomic write with an alignment or width less than the minimum width
+as if it writes to the relevant surrounding bytes.
+-->
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ordering">Atomic Memory Ordering Constraints</a>
+</h3>
+
+<div>
+
+<p>Atomic instructions (<a href="#i_cmpxchg"><code>cmpxchg</code></a>,
+<a href="#i_atomicrmw"><code>atomicrmw</code></a>,
+<a href="#i_fence"><code>fence</code></a>,
+<a href="#i_load"><code>atomic load</code></a>, and
+<a href="#i_store"><code>atomic store</code></a>) take an ordering parameter
+that determines which other atomic instructions on the same address they
+<i>synchronize with</i>. These semantics are borrowed from Java and C++0x,
+but are somewhat more colloquial. If these descriptions aren't precise enough,
+check those specs (see spec references in the
+<a href="Atomics.html#introduction">atomics guide</a>).
+<a href="#i_fence"><code>fence</code></a> instructions
+treat these orderings somewhat differently since they don't take an address.
+See that instruction's documentation for details.</p>
+
+<p>For a simpler introduction to the ordering constraints, see the
+<a href="Atomics.html">LLVM Atomic Instructions and Concurrency Guide</a>.</p>
+
+<dl>
+<dt><code>unordered</code></dt>
+<dd>The set of values that can be read is governed by the happens-before
+partial order. A value cannot be read unless some operation wrote it.
+This is intended to provide a guarantee strong enough to model Java's
+non-volatile shared variables. This ordering cannot be specified for
+read-modify-write operations; it is not strong enough to make them atomic
+in any interesting way.</dd>
+<dt><code>monotonic</code></dt>
+<dd>In addition to the guarantees of <code>unordered</code>, there is a single
+total order for modifications by <code>monotonic</code> operations on each
+address. All modification orders must be compatible with the happens-before
+order. There is no guarantee that the modification orders can be combined to
+a global total order for the whole program (and this often will not be
+possible). The read in an atomic read-modify-write operation
+(<a href="#i_cmpxchg"><code>cmpxchg</code></a> and
+<a href="#i_atomicrmw"><code>atomicrmw</code></a>)
+reads the value in the modification order immediately before the value it
+writes. If one atomic read happens before another atomic read of the same
+address, the later read must see the same value or a later value in the
+address's modification order. This disallows reordering of
+<code>monotonic</code> (or stronger) operations on the same address. If an
+address is written <code>monotonic</code>ally by one thread, and other threads
+<code>monotonic</code>ally read that address repeatedly, the other threads must
+eventually see the write. This corresponds to the C++0x/C1x
+<code>memory_order_relaxed</code>.</dd>
+<dt><code>acquire</code></dt>
+<dd>In addition to the guarantees of <code>monotonic</code>,
+a <i>synchronizes-with</i> edge may be formed with a <code>release</code>
+operation. This is intended to model C++'s <code>memory_order_acquire</code>.</dd>
+<dt><code>release</code></dt>
+<dd>In addition to the guarantees of <code>monotonic</code>, if this operation
+writes a value which is subsequently read by an <code>acquire</code> operation,
+it <i>synchronizes-with</i> that operation. (This isn't a complete
+description; see the C++0x definition of a release sequence.) This corresponds
+to the C++0x/C1x <code>memory_order_release</code>.</dd>
+<dt><code>acq_rel</code> (acquire+release)</dt><dd>Acts as both an
+<code>acquire</code> and <code>release</code> operation on its address.
+This corresponds to the C++0x/C1x <code>memory_order_acq_rel</code>.</dd>
+<dt><code>seq_cst</code> (sequentially consistent)</dt><dd>
+<dd>In addition to the guarantees of <code>acq_rel</code>
+(<code>acquire</code> for an operation which only reads, <code>release</code>
+for an operation which only writes), there is a global total order on all
+sequentially-consistent operations on all addresses, which is consistent with
+the <i>happens-before</i> partial order and with the modification orders of
+all the affected addresses. Each sequentially-consistent read sees the last
+preceding write to the same address in this global order. This corresponds
+to the C++0x/C1x <code>memory_order_seq_cst</code> and Java volatile.</dd>
+</dl>
+
+<p id="singlethread">If an atomic operation is marked <code>singlethread</code>,
+it only <i>synchronizes with</i> or participates in modification and seq_cst
+total orderings with other operations running in the same thread (for example,
+in signal handlers).</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="typesystem">Type System</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The LLVM type system is one of the most important features of the
+ intermediate representation. Being typed enables a number of optimizations
+ to be performed on the intermediate representation directly, without having
+ to do extra analyses on the side before the transformation. A strong type
+ system makes it easier to read the generated code and enables novel analyses
+ and transformations that are not feasible to perform on normal three address
+ code representations.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="t_classifications">Type Classifications</a>
+</h3>
+
+<div>
+
+<p>The types fall into a few useful classifications:</p>
+
+<table border="1" cellspacing="0" cellpadding="4">
+ <tbody>
+ <tr><th>Classification</th><th>Types</th></tr>
+ <tr>
+ <td><a href="#t_integer">integer</a></td>
+ <td><tt>i1, i2, i3, ... i8, ... i16, ... i32, ... i64, ... </tt></td>
+ </tr>
+ <tr>
+ <td><a href="#t_floating">floating point</a></td>
+ <td><tt>half, float, double, x86_fp80, fp128, ppc_fp128</tt></td>
+ </tr>
+ <tr>
+ <td><a name="t_firstclass">first class</a></td>
+ <td><a href="#t_integer">integer</a>,
+ <a href="#t_floating">floating point</a>,
+ <a href="#t_pointer">pointer</a>,
+ <a href="#t_vector">vector</a>,
+ <a href="#t_struct">structure</a>,
+ <a href="#t_array">array</a>,
+ <a href="#t_label">label</a>,
+ <a href="#t_metadata">metadata</a>.
+ </td>
+ </tr>
+ <tr>
+ <td><a href="#t_primitive">primitive</a></td>
+ <td><a href="#t_label">label</a>,
+ <a href="#t_void">void</a>,
+ <a href="#t_integer">integer</a>,
+ <a href="#t_floating">floating point</a>,
+ <a href="#t_x86mmx">x86mmx</a>,
+ <a href="#t_metadata">metadata</a>.</td>
+ </tr>
+ <tr>
+ <td><a href="#t_derived">derived</a></td>
+ <td><a href="#t_array">array</a>,
+ <a href="#t_function">function</a>,
+ <a href="#t_pointer">pointer</a>,
+ <a href="#t_struct">structure</a>,
+ <a href="#t_vector">vector</a>,
+ <a href="#t_opaque">opaque</a>.
+ </td>
+ </tr>
+ </tbody>
+</table>
+
+<p>The <a href="#t_firstclass">first class</a> types are perhaps the most
+ important. Values of these types are the only ones which can be produced by
+ instructions.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="t_primitive">Primitive Types</a>
+</h3>
+
+<div>
+
+<p>The primitive types are the fundamental building blocks of the LLVM
+ system.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="t_integer">Integer Type</a>
+</h4>
+
+<div>
+
+<h5>Overview:</h5>
+<p>The integer type is a very simple type that simply specifies an arbitrary
+ bit width for the integer type desired. Any bit width from 1 bit to
+ 2<sup>23</sup>-1 (about 8 million) can be specified.</p>
+
+<h5>Syntax:</h5>
+<pre>
+ iN
+</pre>
+
+<p>The number of bits the integer will occupy is specified by the <tt>N</tt>
+ value.</p>
+
+<h5>Examples:</h5>
+<table class="layout">
+ <tr class="layout">
+ <td class="left"><tt>i1</tt></td>
+ <td class="left">a single-bit integer.</td>
+ </tr>
+ <tr class="layout">
+ <td class="left"><tt>i32</tt></td>
+ <td class="left">a 32-bit integer.</td>
+ </tr>
+ <tr class="layout">
+ <td class="left"><tt>i1942652</tt></td>
+ <td class="left">a really big integer of over 1 million bits.</td>
+ </tr>
+</table>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="t_floating">Floating Point Types</a>
+</h4>
+
+<div>
+
+<table>
+ <tbody>
+ <tr><th>Type</th><th>Description</th></tr>
+ <tr><td><tt>half</tt></td><td>16-bit floating point value</td></tr>
+ <tr><td><tt>float</tt></td><td>32-bit floating point value</td></tr>
+ <tr><td><tt>double</tt></td><td>64-bit floating point value</td></tr>
+ <tr><td><tt>fp128</tt></td><td>128-bit floating point value (112-bit mantissa)</td></tr>
+ <tr><td><tt>x86_fp80</tt></td><td>80-bit floating point value (X87)</td></tr>
+ <tr><td><tt>ppc_fp128</tt></td><td>128-bit floating point value (two 64-bits)</td></tr>
+ </tbody>
+</table>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="t_x86mmx">X86mmx Type</a>
+</h4>
+
+<div>
+
+<h5>Overview:</h5>
+<p>The x86mmx type represents a value held in an MMX register on an x86 machine. The operations allowed on it are quite limited: parameters and return values, load and store, and bitcast. User-specified MMX instructions are represented as intrinsic or asm calls with arguments and/or results of this type. There are no arrays, vectors or constants of this type.</p>
+
+<h5>Syntax:</h5>
+<pre>
+ x86mmx
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="t_void">Void Type</a>
+</h4>
+
+<div>
+
+<h5>Overview:</h5>
+<p>The void type does not represent any value and has no size.</p>
+
+<h5>Syntax:</h5>
+<pre>
+ void
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="t_label">Label Type</a>
+</h4>
+
+<div>
+
+<h5>Overview:</h5>
+<p>The label type represents code labels.</p>
+
+<h5>Syntax:</h5>
+<pre>
+ label
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="t_metadata">Metadata Type</a>
+</h4>
+
+<div>
+
+<h5>Overview:</h5>
+<p>The metadata type represents embedded metadata. No derived types may be
+ created from metadata except for <a href="#t_function">function</a>
+ arguments.
+
+<h5>Syntax:</h5>
+<pre>
+ metadata
+</pre>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="t_derived">Derived Types</a>
+</h3>
+
+<div>
+
+<p>The real power in LLVM comes from the derived types in the system. This is
+ what allows a programmer to represent arrays, functions, pointers, and other
+ useful types. Each of these types contain one or more element types which
+ may be a primitive type, or another derived type. For example, it is
+ possible to have a two dimensional array, using an array as the element type
+ of another array.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="t_aggregate">Aggregate Types</a>
+</h4>
+
+<div>
+
+<p>Aggregate Types are a subset of derived types that can contain multiple
+ member types. <a href="#t_array">Arrays</a> and
+ <a href="#t_struct">structs</a> are aggregate types.
+ <a href="#t_vector">Vectors</a> are not considered to be aggregate types.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="t_array">Array Type</a>
+</h4>
+
+<div>
+
+<h5>Overview:</h5>
+<p>The array type is a very simple derived type that arranges elements
+ sequentially in memory. The array type requires a size (number of elements)
+ and an underlying data type.</p>
+
+<h5>Syntax:</h5>
+<pre>
+ [&lt;# elements&gt; x &lt;elementtype&gt;]
+</pre>
+
+<p>The number of elements is a constant integer value; <tt>elementtype</tt> may
+ be any type with a size.</p>
+
+<h5>Examples:</h5>
+<table class="layout">
+ <tr class="layout">
+ <td class="left"><tt>[40 x i32]</tt></td>
+ <td class="left">Array of 40 32-bit integer values.</td>
+ </tr>
+ <tr class="layout">
+ <td class="left"><tt>[41 x i32]</tt></td>
+ <td class="left">Array of 41 32-bit integer values.</td>
+ </tr>
+ <tr class="layout">
+ <td class="left"><tt>[4 x i8]</tt></td>
+ <td class="left">Array of 4 8-bit integer values.</td>
+ </tr>
+</table>
+<p>Here are some examples of multidimensional arrays:</p>
+<table class="layout">
+ <tr class="layout">
+ <td class="left"><tt>[3 x [4 x i32]]</tt></td>
+ <td class="left">3x4 array of 32-bit integer values.</td>
+ </tr>
+ <tr class="layout">
+ <td class="left"><tt>[12 x [10 x float]]</tt></td>
+ <td class="left">12x10 array of single precision floating point values.</td>
+ </tr>
+ <tr class="layout">
+ <td class="left"><tt>[2 x [3 x [4 x i16]]]</tt></td>
+ <td class="left">2x3x4 array of 16-bit integer values.</td>
+ </tr>
+</table>
+
+<p>There is no restriction on indexing beyond the end of the array implied by
+ a static type (though there are restrictions on indexing beyond the bounds
+ of an allocated object in some cases). This means that single-dimension
+ 'variable sized array' addressing can be implemented in LLVM with a zero
+ length array type. An implementation of 'pascal style arrays' in LLVM could
+ use the type "<tt>{ i32, [0 x float]}</tt>", for example.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="t_function">Function Type</a>
+</h4>
+
+<div>
+
+<h5>Overview:</h5>
+<p>The function type can be thought of as a function signature. It consists of
+ a return type and a list of formal parameter types. The return type of a
+ function type is a first class type or a void type.</p>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;returntype&gt; (&lt;parameter list&gt;)
+</pre>
+
+<p>...where '<tt>&lt;parameter list&gt;</tt>' is a comma-separated list of type
+ specifiers. Optionally, the parameter list may include a type <tt>...</tt>,
+ which indicates that the function takes a variable number of arguments.
+ Variable argument functions can access their arguments with
+ the <a href="#int_varargs">variable argument handling intrinsic</a>
+ functions. '<tt>&lt;returntype&gt;</tt>' is any type except
+ <a href="#t_label">label</a>.</p>
+
+<h5>Examples:</h5>
+<table class="layout">
+ <tr class="layout">
+ <td class="left"><tt>i32 (i32)</tt></td>
+ <td class="left">function taking an <tt>i32</tt>, returning an <tt>i32</tt>
+ </td>
+ </tr><tr class="layout">
+ <td class="left"><tt>float&nbsp;(i16,&nbsp;i32&nbsp;*)&nbsp;*
+ </tt></td>
+ <td class="left"><a href="#t_pointer">Pointer</a> to a function that takes
+ an <tt>i16</tt> and a <a href="#t_pointer">pointer</a> to <tt>i32</tt>,
+ returning <tt>float</tt>.
+ </td>
+ </tr><tr class="layout">
+ <td class="left"><tt>i32 (i8*, ...)</tt></td>
+ <td class="left">A vararg function that takes at least one
+ <a href="#t_pointer">pointer</a> to <tt>i8 </tt> (char in C),
+ which returns an integer. This is the signature for <tt>printf</tt> in
+ LLVM.
+ </td>
+ </tr><tr class="layout">
+ <td class="left"><tt>{i32, i32} (i32)</tt></td>
+ <td class="left">A function taking an <tt>i32</tt>, returning a
+ <a href="#t_struct">structure</a> containing two <tt>i32</tt> values
+ </td>
+ </tr>
+</table>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="t_struct">Structure Type</a>
+</h4>
+
+<div>
+
+<h5>Overview:</h5>
+<p>The structure type is used to represent a collection of data members together
+ in memory. The elements of a structure may be any type that has a size.</p>
+
+<p>Structures in memory are accessed using '<tt><a href="#i_load">load</a></tt>'
+ and '<tt><a href="#i_store">store</a></tt>' by getting a pointer to a field
+ with the '<tt><a href="#i_getelementptr">getelementptr</a></tt>' instruction.
+ Structures in registers are accessed using the
+ '<tt><a href="#i_extractvalue">extractvalue</a></tt>' and
+ '<tt><a href="#i_insertvalue">insertvalue</a></tt>' instructions.</p>
+
+<p>Structures may optionally be "packed" structures, which indicate that the
+ alignment of the struct is one byte, and that there is no padding between
+ the elements. In non-packed structs, padding between field types is inserted
+ as defined by the TargetData string in the module, which is required to match
+ what the underlying code generator expects.</p>
+
+<p>Structures can either be "literal" or "identified". A literal structure is
+ defined inline with other types (e.g. <tt>{i32, i32}*</tt>) whereas identified
+ types are always defined at the top level with a name. Literal types are
+ uniqued by their contents and can never be recursive or opaque since there is
+ no way to write one. Identified types can be recursive, can be opaqued, and are
+ never uniqued.
+</p>
+
+<h5>Syntax:</h5>
+<pre>
+ %T1 = type { &lt;type list&gt; } <i>; Identified normal struct type</i>
+ %T2 = type &lt;{ &lt;type list&gt; }&gt; <i>; Identified packed struct type</i>
+</pre>
+
+<h5>Examples:</h5>
+<table class="layout">
+ <tr class="layout">
+ <td class="left"><tt>{ i32, i32, i32 }</tt></td>
+ <td class="left">A triple of three <tt>i32</tt> values</td>
+ </tr>
+ <tr class="layout">
+ <td class="left"><tt>{&nbsp;float,&nbsp;i32&nbsp;(i32)&nbsp;*&nbsp;}</tt></td>
+ <td class="left">A pair, where the first element is a <tt>float</tt> and the
+ second element is a <a href="#t_pointer">pointer</a> to a
+ <a href="#t_function">function</a> that takes an <tt>i32</tt>, returning
+ an <tt>i32</tt>.</td>
+ </tr>
+ <tr class="layout">
+ <td class="left"><tt>&lt;{ i8, i32 }&gt;</tt></td>
+ <td class="left">A packed struct known to be 5 bytes in size.</td>
+ </tr>
+</table>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="t_opaque">Opaque Structure Types</a>
+</h4>
+
+<div>
+
+<h5>Overview:</h5>
+<p>Opaque structure types are used to represent named structure types that do
+ not have a body specified. This corresponds (for example) to the C notion of
+ a forward declared structure.</p>
+
+<h5>Syntax:</h5>
+<pre>
+ %X = type opaque
+ %52 = type opaque
+</pre>
+
+<h5>Examples:</h5>
+<table class="layout">
+ <tr class="layout">
+ <td class="left"><tt>opaque</tt></td>
+ <td class="left">An opaque type.</td>
+ </tr>
+</table>
+
+</div>
+
+
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="t_pointer">Pointer Type</a>
+</h4>
+
+<div>
+
+<h5>Overview:</h5>
+<p>The pointer type is used to specify memory locations.
+ Pointers are commonly used to reference objects in memory.</p>
+
+<p>Pointer types may have an optional address space attribute defining the
+ numbered address space where the pointed-to object resides. The default
+ address space is number zero. The semantics of non-zero address
+ spaces are target-specific.</p>
+
+<p>Note that LLVM does not permit pointers to void (<tt>void*</tt>) nor does it
+ permit pointers to labels (<tt>label*</tt>). Use <tt>i8*</tt> instead.</p>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;type&gt; *
+</pre>
+
+<h5>Examples:</h5>
+<table class="layout">
+ <tr class="layout">
+ <td class="left"><tt>[4 x i32]*</tt></td>
+ <td class="left">A <a href="#t_pointer">pointer</a> to <a
+ href="#t_array">array</a> of four <tt>i32</tt> values.</td>
+ </tr>
+ <tr class="layout">
+ <td class="left"><tt>i32 (i32*) *</tt></td>
+ <td class="left"> A <a href="#t_pointer">pointer</a> to a <a
+ href="#t_function">function</a> that takes an <tt>i32*</tt>, returning an
+ <tt>i32</tt>.</td>
+ </tr>
+ <tr class="layout">
+ <td class="left"><tt>i32 addrspace(5)*</tt></td>
+ <td class="left">A <a href="#t_pointer">pointer</a> to an <tt>i32</tt> value
+ that resides in address space #5.</td>
+ </tr>
+</table>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="t_vector">Vector Type</a>
+</h4>
+
+<div>
+
+<h5>Overview:</h5>
+<p>A vector type is a simple derived type that represents a vector of elements.
+ Vector types are used when multiple primitive data are operated in parallel
+ using a single instruction (SIMD). A vector type requires a size (number of
+ elements) and an underlying primitive data type. Vector types are considered
+ <a href="#t_firstclass">first class</a>.</p>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt; &lt;# elements&gt; x &lt;elementtype&gt; &gt;
+</pre>
+
+<p>The number of elements is a constant integer value larger than 0; elementtype
+ may be any integer or floating point type, or a pointer to these types.
+ Vectors of size zero are not allowed. </p>
+
+<h5>Examples:</h5>
+<table class="layout">
+ <tr class="layout">
+ <td class="left"><tt>&lt;4 x i32&gt;</tt></td>
+ <td class="left">Vector of 4 32-bit integer values.</td>
+ </tr>
+ <tr class="layout">
+ <td class="left"><tt>&lt;8 x float&gt;</tt></td>
+ <td class="left">Vector of 8 32-bit floating-point values.</td>
+ </tr>
+ <tr class="layout">
+ <td class="left"><tt>&lt;2 x i64&gt;</tt></td>
+ <td class="left">Vector of 2 64-bit integer values.</td>
+ </tr>
+ <tr class="layout">
+ <td class="left"><tt>&lt;4 x i64*&gt;</tt></td>
+ <td class="left">Vector of 4 pointers to 64-bit integer values.</td>
+ </tr>
+</table>
+
+</div>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="constants">Constants</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>LLVM has several different basic types of constants. This section describes
+ them all and their syntax.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="simpleconstants">Simple Constants</a>
+</h3>
+
+<div>
+
+<dl>
+ <dt><b>Boolean constants</b></dt>
+ <dd>The two strings '<tt>true</tt>' and '<tt>false</tt>' are both valid
+ constants of the <tt><a href="#t_integer">i1</a></tt> type.</dd>
+
+ <dt><b>Integer constants</b></dt>
+ <dd>Standard integers (such as '4') are constants of
+ the <a href="#t_integer">integer</a> type. Negative numbers may be used
+ with integer types.</dd>
+
+ <dt><b>Floating point constants</b></dt>
+ <dd>Floating point constants use standard decimal notation (e.g. 123.421),
+ exponential notation (e.g. 1.23421e+2), or a more precise hexadecimal
+ notation (see below). The assembler requires the exact decimal value of a
+ floating-point constant. For example, the assembler accepts 1.25 but
+ rejects 1.3 because 1.3 is a repeating decimal in binary. Floating point
+ constants must have a <a href="#t_floating">floating point</a> type. </dd>
+
+ <dt><b>Null pointer constants</b></dt>
+ <dd>The identifier '<tt>null</tt>' is recognized as a null pointer constant
+ and must be of <a href="#t_pointer">pointer type</a>.</dd>
+</dl>
+
+<p>The one non-intuitive notation for constants is the hexadecimal form of
+ floating point constants. For example, the form '<tt>double
+ 0x432ff973cafa8000</tt>' is equivalent to (but harder to read than)
+ '<tt>double 4.5e+15</tt>'. The only time hexadecimal floating point
+ constants are required (and the only time that they are generated by the
+ disassembler) is when a floating point constant must be emitted but it cannot
+ be represented as a decimal floating point number in a reasonable number of
+ digits. For example, NaN's, infinities, and other special values are
+ represented in their IEEE hexadecimal format so that assembly and disassembly
+ do not cause any bits to change in the constants.</p>
+
+<p>When using the hexadecimal form, constants of types half, float, and double are
+ represented using the 16-digit form shown above (which matches the IEEE754
+ representation for double); half and float values must, however, be exactly
+ representable as IEE754 half and single precision, respectively.
+ Hexadecimal format is always used
+ for long double, and there are three forms of long double. The 80-bit format
+ used by x86 is represented as <tt>0xK</tt> followed by 20 hexadecimal digits.
+ The 128-bit format used by PowerPC (two adjacent doubles) is represented
+ by <tt>0xM</tt> followed by 32 hexadecimal digits. The IEEE 128-bit format
+ is represented by <tt>0xL</tt> followed by 32 hexadecimal digits; no
+ currently supported target uses this format. Long doubles will only work if
+ they match the long double format on your target. The IEEE 16-bit format
+ (half precision) is represented by <tt>0xH</tt> followed by 4 hexadecimal
+ digits. All hexadecimal formats are big-endian (sign bit at the left).</p>
+
+<p>There are no constants of type x86mmx.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+<a name="aggregateconstants"></a> <!-- old anchor -->
+<a name="complexconstants">Complex Constants</a>
+</h3>
+
+<div>
+
+<p>Complex constants are a (potentially recursive) combination of simple
+ constants and smaller complex constants.</p>
+
+<dl>
+ <dt><b>Structure constants</b></dt>
+ <dd>Structure constants are represented with notation similar to structure
+ type definitions (a comma separated list of elements, surrounded by braces
+ (<tt>{}</tt>)). For example: "<tt>{ i32 4, float 17.0, i32* @G }</tt>",
+ where "<tt>@G</tt>" is declared as "<tt>@G = external global i32</tt>".
+ Structure constants must have <a href="#t_struct">structure type</a>, and
+ the number and types of elements must match those specified by the
+ type.</dd>
+
+ <dt><b>Array constants</b></dt>
+ <dd>Array constants are represented with notation similar to array type
+ definitions (a comma separated list of elements, surrounded by square
+ brackets (<tt>[]</tt>)). For example: "<tt>[ i32 42, i32 11, i32 74
+ ]</tt>". Array constants must have <a href="#t_array">array type</a>, and
+ the number and types of elements must match those specified by the
+ type.</dd>
+
+ <dt><b>Vector constants</b></dt>
+ <dd>Vector constants are represented with notation similar to vector type
+ definitions (a comma separated list of elements, surrounded by
+ less-than/greater-than's (<tt>&lt;&gt;</tt>)). For example: "<tt>&lt; i32
+ 42, i32 11, i32 74, i32 100 &gt;</tt>". Vector constants must
+ have <a href="#t_vector">vector type</a>, and the number and types of
+ elements must match those specified by the type.</dd>
+
+ <dt><b>Zero initialization</b></dt>
+ <dd>The string '<tt>zeroinitializer</tt>' can be used to zero initialize a
+ value to zero of <em>any</em> type, including scalar and
+ <a href="#t_aggregate">aggregate</a> types.
+ This is often used to avoid having to print large zero initializers
+ (e.g. for large arrays) and is always exactly equivalent to using explicit
+ zero initializers.</dd>
+
+ <dt><b>Metadata node</b></dt>
+ <dd>A metadata node is a structure-like constant with
+ <a href="#t_metadata">metadata type</a>. For example: "<tt>metadata !{
+ i32 0, metadata !"test" }</tt>". Unlike other constants that are meant to
+ be interpreted as part of the instruction stream, metadata is a place to
+ attach additional information such as debug info.</dd>
+</dl>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="globalconstants">Global Variable and Function Addresses</a>
+</h3>
+
+<div>
+
+<p>The addresses of <a href="#globalvars">global variables</a>
+ and <a href="#functionstructure">functions</a> are always implicitly valid
+ (link-time) constants. These constants are explicitly referenced when
+ the <a href="#identifiers">identifier for the global</a> is used and always
+ have <a href="#t_pointer">pointer</a> type. For example, the following is a
+ legal LLVM file:</p>
+
+<pre class="doc_code">
+@X = global i32 17
+@Y = global i32 42
+@Z = global [2 x i32*] [ i32* @X, i32* @Y ]
+</pre>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="undefvalues">Undefined Values</a>
+</h3>
+
+<div>
+
+<p>The string '<tt>undef</tt>' can be used anywhere a constant is expected, and
+ indicates that the user of the value may receive an unspecified bit-pattern.
+ Undefined values may be of any type (other than '<tt>label</tt>'
+ or '<tt>void</tt>') and be used anywhere a constant is permitted.</p>
+
+<p>Undefined values are useful because they indicate to the compiler that the
+ program is well defined no matter what value is used. This gives the
+ compiler more freedom to optimize. Here are some examples of (potentially
+ surprising) transformations that are valid (in pseudo IR):</p>
+
+
+<pre class="doc_code">
+ %A = add %X, undef
+ %B = sub %X, undef
+ %C = xor %X, undef
+Safe:
+ %A = undef
+ %B = undef
+ %C = undef
+</pre>
+
+<p>This is safe because all of the output bits are affected by the undef bits.
+ Any output bit can have a zero or one depending on the input bits.</p>
+
+<pre class="doc_code">
+ %A = or %X, undef
+ %B = and %X, undef
+Safe:
+ %A = -1
+ %B = 0
+Unsafe:
+ %A = undef
+ %B = undef
+</pre>
+
+<p>These logical operations have bits that are not always affected by the input.
+ For example, if <tt>%X</tt> has a zero bit, then the output of the
+ '<tt>and</tt>' operation will always be a zero for that bit, no matter what
+ the corresponding bit from the '<tt>undef</tt>' is. As such, it is unsafe to
+ optimize or assume that the result of the '<tt>and</tt>' is '<tt>undef</tt>'.
+ However, it is safe to assume that all bits of the '<tt>undef</tt>' could be
+ 0, and optimize the '<tt>and</tt>' to 0. Likewise, it is safe to assume that
+ all the bits of the '<tt>undef</tt>' operand to the '<tt>or</tt>' could be
+ set, allowing the '<tt>or</tt>' to be folded to -1.</p>
+
+<pre class="doc_code">
+ %A = select undef, %X, %Y
+ %B = select undef, 42, %Y
+ %C = select %X, %Y, undef
+Safe:
+ %A = %X (or %Y)
+ %B = 42 (or %Y)
+ %C = %Y
+Unsafe:
+ %A = undef
+ %B = undef
+ %C = undef
+</pre>
+
+<p>This set of examples shows that undefined '<tt>select</tt>' (and conditional
+ branch) conditions can go <em>either way</em>, but they have to come from one
+ of the two operands. In the <tt>%A</tt> example, if <tt>%X</tt> and
+ <tt>%Y</tt> were both known to have a clear low bit, then <tt>%A</tt> would
+ have to have a cleared low bit. However, in the <tt>%C</tt> example, the
+ optimizer is allowed to assume that the '<tt>undef</tt>' operand could be the
+ same as <tt>%Y</tt>, allowing the whole '<tt>select</tt>' to be
+ eliminated.</p>
+
+<pre class="doc_code">
+ %A = xor undef, undef
+
+ %B = undef
+ %C = xor %B, %B
+
+ %D = undef
+ %E = icmp lt %D, 4
+ %F = icmp gte %D, 4
+
+Safe:
+ %A = undef
+ %B = undef
+ %C = undef
+ %D = undef
+ %E = undef
+ %F = undef
+</pre>
+
+<p>This example points out that two '<tt>undef</tt>' operands are not
+ necessarily the same. This can be surprising to people (and also matches C
+ semantics) where they assume that "<tt>X^X</tt>" is always zero, even
+ if <tt>X</tt> is undefined. This isn't true for a number of reasons, but the
+ short answer is that an '<tt>undef</tt>' "variable" can arbitrarily change
+ its value over its "live range". This is true because the variable doesn't
+ actually <em>have a live range</em>. Instead, the value is logically read
+ from arbitrary registers that happen to be around when needed, so the value
+ is not necessarily consistent over time. In fact, <tt>%A</tt> and <tt>%C</tt>
+ need to have the same semantics or the core LLVM "replace all uses with"
+ concept would not hold.</p>
+
+<pre class="doc_code">
+ %A = fdiv undef, %X
+ %B = fdiv %X, undef
+Safe:
+ %A = undef
+b: unreachable
+</pre>
+
+<p>These examples show the crucial difference between an <em>undefined
+ value</em> and <em>undefined behavior</em>. An undefined value (like
+ '<tt>undef</tt>') is allowed to have an arbitrary bit-pattern. This means that
+ the <tt>%A</tt> operation can be constant folded to '<tt>undef</tt>', because
+ the '<tt>undef</tt>' could be an SNaN, and <tt>fdiv</tt> is not (currently)
+ defined on SNaN's. However, in the second example, we can make a more
+ aggressive assumption: because the <tt>undef</tt> is allowed to be an
+ arbitrary value, we are allowed to assume that it could be zero. Since a
+ divide by zero has <em>undefined behavior</em>, we are allowed to assume that
+ the operation does not execute at all. This allows us to delete the divide and
+ all code after it. Because the undefined operation "can't happen", the
+ optimizer can assume that it occurs in dead code.</p>
+
+<pre class="doc_code">
+a: store undef -> %X
+b: store %X -> undef
+Safe:
+a: &lt;deleted&gt;
+b: unreachable
+</pre>
+
+<p>These examples reiterate the <tt>fdiv</tt> example: a store <em>of</em> an
+ undefined value can be assumed to not have any effect; we can assume that the
+ value is overwritten with bits that happen to match what was already there.
+ However, a store <em>to</em> an undefined location could clobber arbitrary
+ memory, therefore, it has undefined behavior.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="poisonvalues">Poison Values</a>
+</h3>
+
+<div>
+
+<p>Poison values are similar to <a href="#undefvalues">undef values</a>, however
+ they also represent the fact that an instruction or constant expression which
+ cannot evoke side effects has nevertheless detected a condition which results
+ in undefined behavior.</p>
+
+<p>There is currently no way of representing a poison value in the IR; they
+ only exist when produced by operations such as
+ <a href="#i_add"><tt>add</tt></a> with the <tt>nsw</tt> flag.</p>
+
+<p>Poison value behavior is defined in terms of value <i>dependence</i>:</p>
+
+<ul>
+<li>Values other than <a href="#i_phi"><tt>phi</tt></a> nodes depend on
+ their operands.</li>
+
+<li><a href="#i_phi"><tt>Phi</tt></a> nodes depend on the operand corresponding
+ to their dynamic predecessor basic block.</li>
+
+<li>Function arguments depend on the corresponding actual argument values in
+ the dynamic callers of their functions.</li>
+
+<li><a href="#i_call"><tt>Call</tt></a> instructions depend on the
+ <a href="#i_ret"><tt>ret</tt></a> instructions that dynamically transfer
+ control back to them.</li>
+
+<li><a href="#i_invoke"><tt>Invoke</tt></a> instructions depend on the
+ <a href="#i_ret"><tt>ret</tt></a>, <a href="#i_resume"><tt>resume</tt></a>,
+ or exception-throwing call instructions that dynamically transfer control
+ back to them.</li>
+
+<li>Non-volatile loads and stores depend on the most recent stores to all of the
+ referenced memory addresses, following the order in the IR
+ (including loads and stores implied by intrinsics such as
+ <a href="#int_memcpy"><tt>@llvm.memcpy</tt></a>.)</li>
+
+<!-- TODO: In the case of multiple threads, this only applies if the store
+ "happens-before" the load or store. -->
+
+<!-- TODO: floating-point exception state -->
+
+<li>An instruction with externally visible side effects depends on the most
+ recent preceding instruction with externally visible side effects, following
+ the order in the IR. (This includes
+ <a href="#volatile">volatile operations</a>.)</li>
+
+<li>An instruction <i>control-depends</i> on a
+ <a href="#terminators">terminator instruction</a>
+ if the terminator instruction has multiple successors and the instruction
+ is always executed when control transfers to one of the successors, and
+ may not be executed when control is transferred to another.</li>
+
+<li>Additionally, an instruction also <i>control-depends</i> on a terminator
+ instruction if the set of instructions it otherwise depends on would be
+ different if the terminator had transferred control to a different
+ successor.</li>
+
+<li>Dependence is transitive.</li>
+
+</ul>
+
+<p>Poison Values have the same behavior as <a href="#undefvalues">undef values</a>,
+ with the additional affect that any instruction which has a <i>dependence</i>
+ on a poison value has undefined behavior.</p>
+
+<p>Here are some examples:</p>
+
+<pre class="doc_code">
+entry:
+ %poison = sub nuw i32 0, 1 ; Results in a poison value.
+ %still_poison = and i32 %poison, 0 ; 0, but also poison.
+ %poison_yet_again = getelementptr i32* @h, i32 %still_poison
+ store i32 0, i32* %poison_yet_again ; memory at @h[0] is poisoned
+
+ store i32 %poison, i32* @g ; Poison value stored to memory.
+ %poison2 = load i32* @g ; Poison value loaded back from memory.
+
+ store volatile i32 %poison, i32* @g ; External observation; undefined behavior.
+
+ %narrowaddr = bitcast i32* @g to i16*
+ %wideaddr = bitcast i32* @g to i64*
+ %poison3 = load i16* %narrowaddr ; Returns a poison value.
+ %poison4 = load i64* %wideaddr ; Returns a poison value.
+
+ %cmp = icmp slt i32 %poison, 0 ; Returns a poison value.
+ br i1 %cmp, label %true, label %end ; Branch to either destination.
+
+true:
+ store volatile i32 0, i32* @g ; This is control-dependent on %cmp, so
+ ; it has undefined behavior.
+ br label %end
+
+end:
+ %p = phi i32 [ 0, %entry ], [ 1, %true ]
+ ; Both edges into this PHI are
+ ; control-dependent on %cmp, so this
+ ; always results in a poison value.
+
+ store volatile i32 0, i32* @g ; This would depend on the store in %true
+ ; if %cmp is true, or the store in %entry
+ ; otherwise, so this is undefined behavior.
+
+ br i1 %cmp, label %second_true, label %second_end
+ ; The same branch again, but this time the
+ ; true block doesn't have side effects.
+
+second_true:
+ ; No side effects!
+ ret void
+
+second_end:
+ store volatile i32 0, i32* @g ; This time, the instruction always depends
+ ; on the store in %end. Also, it is
+ ; control-equivalent to %end, so this is
+ ; well-defined (ignoring earlier undefined
+ ; behavior in this example).
+</pre>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="blockaddress">Addresses of Basic Blocks</a>
+</h3>
+
+<div>
+
+<p><b><tt>blockaddress(@function, %block)</tt></b></p>
+
+<p>The '<tt>blockaddress</tt>' constant computes the address of the specified
+ basic block in the specified function, and always has an i8* type. Taking
+ the address of the entry block is illegal.</p>
+
+<p>This value only has defined behavior when used as an operand to the
+ '<a href="#i_indirectbr"><tt>indirectbr</tt></a>' instruction, or for
+ comparisons against null. Pointer equality tests between labels addresses
+ results in undefined behavior &mdash; though, again, comparison against null
+ is ok, and no label is equal to the null pointer. This may be passed around
+ as an opaque pointer sized value as long as the bits are not inspected. This
+ allows <tt>ptrtoint</tt> and arithmetic to be performed on these values so
+ long as the original value is reconstituted before the <tt>indirectbr</tt>
+ instruction.</p>
+
+<p>Finally, some targets may provide defined semantics when using the value as
+ the operand to an inline assembly, but that is target specific.</p>
+
+</div>
+
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="constantexprs">Constant Expressions</a>
+</h3>
+
+<div>
+
+<p>Constant expressions are used to allow expressions involving other constants
+ to be used as constants. Constant expressions may be of
+ any <a href="#t_firstclass">first class</a> type and may involve any LLVM
+ operation that does not have side effects (e.g. load and call are not
+ supported). The following is the syntax for constant expressions:</p>
+
+<dl>
+ <dt><b><tt>trunc (CST to TYPE)</tt></b></dt>
+ <dd>Truncate a constant to another type. The bit size of CST must be larger
+ than the bit size of TYPE. Both types must be integers.</dd>
+
+ <dt><b><tt>zext (CST to TYPE)</tt></b></dt>
+ <dd>Zero extend a constant to another type. The bit size of CST must be
+ smaller than the bit size of TYPE. Both types must be integers.</dd>
+
+ <dt><b><tt>sext (CST to TYPE)</tt></b></dt>
+ <dd>Sign extend a constant to another type. The bit size of CST must be
+ smaller than the bit size of TYPE. Both types must be integers.</dd>
+
+ <dt><b><tt>fptrunc (CST to TYPE)</tt></b></dt>
+ <dd>Truncate a floating point constant to another floating point type. The
+ size of CST must be larger than the size of TYPE. Both types must be
+ floating point.</dd>
+
+ <dt><b><tt>fpext (CST to TYPE)</tt></b></dt>
+ <dd>Floating point extend a constant to another type. The size of CST must be
+ smaller or equal to the size of TYPE. Both types must be floating
+ point.</dd>
+
+ <dt><b><tt>fptoui (CST to TYPE)</tt></b></dt>
+ <dd>Convert a floating point constant to the corresponding unsigned integer
+ constant. TYPE must be a scalar or vector integer type. CST must be of
+ scalar or vector floating point type. Both CST and TYPE must be scalars,
+ or vectors of the same number of elements. If the value won't fit in the
+ integer type, the results are undefined.</dd>
+
+ <dt><b><tt>fptosi (CST to TYPE)</tt></b></dt>
+ <dd>Convert a floating point constant to the corresponding signed integer
+ constant. TYPE must be a scalar or vector integer type. CST must be of
+ scalar or vector floating point type. Both CST and TYPE must be scalars,
+ or vectors of the same number of elements. If the value won't fit in the
+ integer type, the results are undefined.</dd>
+
+ <dt><b><tt>uitofp (CST to TYPE)</tt></b></dt>
+ <dd>Convert an unsigned integer constant to the corresponding floating point
+ constant. TYPE must be a scalar or vector floating point type. CST must be
+ of scalar or vector integer type. Both CST and TYPE must be scalars, or
+ vectors of the same number of elements. If the value won't fit in the
+ floating point type, the results are undefined.</dd>
+
+ <dt><b><tt>sitofp (CST to TYPE)</tt></b></dt>
+ <dd>Convert a signed integer constant to the corresponding floating point
+ constant. TYPE must be a scalar or vector floating point type. CST must be
+ of scalar or vector integer type. Both CST and TYPE must be scalars, or
+ vectors of the same number of elements. If the value won't fit in the
+ floating point type, the results are undefined.</dd>
+
+ <dt><b><tt>ptrtoint (CST to TYPE)</tt></b></dt>
+ <dd>Convert a pointer typed constant to the corresponding integer constant
+ <tt>TYPE</tt> must be an integer type. <tt>CST</tt> must be of pointer
+ type. The <tt>CST</tt> value is zero extended, truncated, or unchanged to
+ make it fit in <tt>TYPE</tt>.</dd>
+
+ <dt><b><tt>inttoptr (CST to TYPE)</tt></b></dt>
+ <dd>Convert an integer constant to a pointer constant. TYPE must be a pointer
+ type. CST must be of integer type. The CST value is zero extended,
+ truncated, or unchanged to make it fit in a pointer size. This one is
+ <i>really</i> dangerous!</dd>
+
+ <dt><b><tt>bitcast (CST to TYPE)</tt></b></dt>
+ <dd>Convert a constant, CST, to another TYPE. The constraints of the operands
+ are the same as those for the <a href="#i_bitcast">bitcast
+ instruction</a>.</dd>
+
+ <dt><b><tt>getelementptr (CSTPTR, IDX0, IDX1, ...)</tt></b></dt>
+ <dt><b><tt>getelementptr inbounds (CSTPTR, IDX0, IDX1, ...)</tt></b></dt>
+ <dd>Perform the <a href="#i_getelementptr">getelementptr operation</a> on
+ constants. As with the <a href="#i_getelementptr">getelementptr</a>
+ instruction, the index list may have zero or more indexes, which are
+ required to make sense for the type of "CSTPTR".</dd>
+
+ <dt><b><tt>select (COND, VAL1, VAL2)</tt></b></dt>
+ <dd>Perform the <a href="#i_select">select operation</a> on constants.</dd>
+
+ <dt><b><tt>icmp COND (VAL1, VAL2)</tt></b></dt>
+ <dd>Performs the <a href="#i_icmp">icmp operation</a> on constants.</dd>
+
+ <dt><b><tt>fcmp COND (VAL1, VAL2)</tt></b></dt>
+ <dd>Performs the <a href="#i_fcmp">fcmp operation</a> on constants.</dd>
+
+ <dt><b><tt>extractelement (VAL, IDX)</tt></b></dt>
+ <dd>Perform the <a href="#i_extractelement">extractelement operation</a> on
+ constants.</dd>
+
+ <dt><b><tt>insertelement (VAL, ELT, IDX)</tt></b></dt>
+ <dd>Perform the <a href="#i_insertelement">insertelement operation</a> on
+ constants.</dd>
+
+ <dt><b><tt>shufflevector (VEC1, VEC2, IDXMASK)</tt></b></dt>
+ <dd>Perform the <a href="#i_shufflevector">shufflevector operation</a> on
+ constants.</dd>
+
+ <dt><b><tt>extractvalue (VAL, IDX0, IDX1, ...)</tt></b></dt>
+ <dd>Perform the <a href="#i_extractvalue">extractvalue operation</a> on
+ constants. The index list is interpreted in a similar manner as indices in
+ a '<a href="#i_getelementptr">getelementptr</a>' operation. At least one
+ index value must be specified.</dd>
+
+ <dt><b><tt>insertvalue (VAL, ELT, IDX0, IDX1, ...)</tt></b></dt>
+ <dd>Perform the <a href="#i_insertvalue">insertvalue operation</a> on
+ constants. The index list is interpreted in a similar manner as indices in
+ a '<a href="#i_getelementptr">getelementptr</a>' operation. At least one
+ index value must be specified.</dd>
+
+ <dt><b><tt>OPCODE (LHS, RHS)</tt></b></dt>
+ <dd>Perform the specified operation of the LHS and RHS constants. OPCODE may
+ be any of the <a href="#binaryops">binary</a>
+ or <a href="#bitwiseops">bitwise binary</a> operations. The constraints
+ on operands are the same as those for the corresponding instruction
+ (e.g. no bitwise operations on floating point values are allowed).</dd>
+</dl>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="othervalues">Other Values</a></h2>
+<!-- *********************************************************************** -->
+<div>
+<!-- ======================================================================= -->
+<h3>
+<a name="inlineasm">Inline Assembler Expressions</a>
+</h3>
+
+<div>
+
+<p>LLVM supports inline assembler expressions (as opposed
+ to <a href="#moduleasm">Module-Level Inline Assembly</a>) through the use of
+ a special value. This value represents the inline assembler as a string
+ (containing the instructions to emit), a list of operand constraints (stored
+ as a string), a flag that indicates whether or not the inline asm
+ expression has side effects, and a flag indicating whether the function
+ containing the asm needs to align its stack conservatively. An example
+ inline assembler expression is:</p>
+
+<pre class="doc_code">
+i32 (i32) asm "bswap $0", "=r,r"
+</pre>
+
+<p>Inline assembler expressions may <b>only</b> be used as the callee operand of
+ a <a href="#i_call"><tt>call</tt></a> or an
+ <a href="#i_invoke"><tt>invoke</tt></a> instruction.
+ Thus, typically we have:</p>
+
+<pre class="doc_code">
+%X = call i32 asm "<a href="#int_bswap">bswap</a> $0", "=r,r"(i32 %Y)
+</pre>
+
+<p>Inline asms with side effects not visible in the constraint list must be
+ marked as having side effects. This is done through the use of the
+ '<tt>sideeffect</tt>' keyword, like so:</p>
+
+<pre class="doc_code">
+call void asm sideeffect "eieio", ""()
+</pre>
+
+<p>In some cases inline asms will contain code that will not work unless the
+ stack is aligned in some way, such as calls or SSE instructions on x86,
+ yet will not contain code that does that alignment within the asm.
+ The compiler should make conservative assumptions about what the asm might
+ contain and should generate its usual stack alignment code in the prologue
+ if the '<tt>alignstack</tt>' keyword is present:</p>
+
+<pre class="doc_code">
+call void asm alignstack "eieio", ""()
+</pre>
+
+<p>Inline asms also support using non-standard assembly dialects. The assumed
+ dialect is ATT. When the '<tt>inteldialect</tt>' keyword is present, the
+ inline asm is using the Intel dialect. Currently, ATT and Intel are the
+ only supported dialects. An example is:</p>
+
+<pre class="doc_code">
+call void asm inteldialect "eieio", ""()
+</pre>
+
+<p>If multiple keywords appear the '<tt>sideeffect</tt>' keyword must come
+ first, the '<tt>alignstack</tt>' keyword second and the
+ '<tt>inteldialect</tt>' keyword last.</p>
+
+<!--
+<p>TODO: The format of the asm and constraints string still need to be
+ documented here. Constraints on what can be done (e.g. duplication, moving,
+ etc need to be documented). This is probably best done by reference to
+ another document that covers inline asm from a holistic perspective.</p>
+ -->
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="inlineasm_md">Inline Asm Metadata</a>
+</h4>
+
+<div>
+
+<p>The call instructions that wrap inline asm nodes may have a
+ "<tt>!srcloc</tt>" MDNode attached to it that contains a list of constant
+ integers. If present, the code generator will use the integer as the
+ location cookie value when report errors through the <tt>LLVMContext</tt>
+ error reporting mechanisms. This allows a front-end to correlate backend
+ errors that occur with inline asm back to the source code that produced it.
+ For example:</p>
+
+<pre class="doc_code">
+call void asm sideeffect "something bad", ""()<b>, !srcloc !42</b>
+...
+!42 = !{ i32 1234567 }
+</pre>
+
+<p>It is up to the front-end to make sense of the magic numbers it places in the
+ IR. If the MDNode contains multiple constants, the code generator will use
+ the one that corresponds to the line of the asm that the error occurs on.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="metadata">Metadata Nodes and Metadata Strings</a>
+</h3>
+
+<div>
+
+<p>LLVM IR allows metadata to be attached to instructions in the program that
+ can convey extra information about the code to the optimizers and code
+ generator. One example application of metadata is source-level debug
+ information. There are two metadata primitives: strings and nodes. All
+ metadata has the <tt>metadata</tt> type and is identified in syntax by a
+ preceding exclamation point ('<tt>!</tt>').</p>
+
+<p>A metadata string is a string surrounded by double quotes. It can contain
+ any character by escaping non-printable characters with "<tt>\xx</tt>" where
+ "<tt>xx</tt>" is the two digit hex code. For example:
+ "<tt>!"test\00"</tt>".</p>
+
+<p>Metadata nodes are represented with notation similar to structure constants
+ (a comma separated list of elements, surrounded by braces and preceded by an
+ exclamation point). Metadata nodes can have any values as their operand. For
+ example:</p>
+
+<div class="doc_code">
+<pre>
+!{ metadata !"test\00", i32 10}
+</pre>
+</div>
+
+<p>A <a href="#namedmetadatastructure">named metadata</a> is a collection of
+ metadata nodes, which can be looked up in the module symbol table. For
+ example:</p>
+
+<div class="doc_code">
+<pre>
+!foo = metadata !{!4, !3}
+</pre>
+</div>
+
+<p>Metadata can be used as function arguments. Here <tt>llvm.dbg.value</tt>
+ function is using two metadata arguments:</p>
+
+<div class="doc_code">
+<pre>
+call void @llvm.dbg.value(metadata !24, i64 0, metadata !25)
+</pre>
+</div>
+
+<p>Metadata can be attached with an instruction. Here metadata <tt>!21</tt> is
+ attached to the <tt>add</tt> instruction using the <tt>!dbg</tt>
+ identifier:</p>
+
+<div class="doc_code">
+<pre>
+%indvar.next = add i64 %indvar, 1, !dbg !21
+</pre>
+</div>
+
+<p>More information about specific metadata nodes recognized by the optimizers
+ and code generator is found below.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="tbaa">'<tt>tbaa</tt>' Metadata</a>
+</h4>
+
+<div>
+
+<p>In LLVM IR, memory does not have types, so LLVM's own type system is not
+ suitable for doing TBAA. Instead, metadata is added to the IR to describe
+ a type system of a higher level language. This can be used to implement
+ typical C/C++ TBAA, but it can also be used to implement custom alias
+ analysis behavior for other languages.</p>
+
+<p>The current metadata format is very simple. TBAA metadata nodes have up to
+ three fields, e.g.:</p>
+
+<div class="doc_code">
+<pre>
+!0 = metadata !{ metadata !"an example type tree" }
+!1 = metadata !{ metadata !"int", metadata !0 }
+!2 = metadata !{ metadata !"float", metadata !0 }
+!3 = metadata !{ metadata !"const float", metadata !2, i64 1 }
+</pre>
+</div>
+
+<p>The first field is an identity field. It can be any value, usually
+ a metadata string, which uniquely identifies the type. The most important
+ name in the tree is the name of the root node. Two trees with
+ different root node names are entirely disjoint, even if they
+ have leaves with common names.</p>
+
+<p>The second field identifies the type's parent node in the tree, or
+ is null or omitted for a root node. A type is considered to alias
+ all of its descendants and all of its ancestors in the tree. Also,
+ a type is considered to alias all types in other trees, so that
+ bitcode produced from multiple front-ends is handled conservatively.</p>
+
+<p>If the third field is present, it's an integer which if equal to 1
+ indicates that the type is "constant" (meaning
+ <tt>pointsToConstantMemory</tt> should return true; see
+ <a href="AliasAnalysis.html#OtherItfs">other useful
+ <tt>AliasAnalysis</tt> methods</a>).</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="fpmath">'<tt>fpmath</tt>' Metadata</a>
+</h4>
+
+<div>
+
+<p><tt>fpmath</tt> metadata may be attached to any instruction of floating point
+ type. It can be used to express the maximum acceptable error in the result of
+ that instruction, in ULPs, thus potentially allowing the compiler to use a
+ more efficient but less accurate method of computing it. ULP is defined as
+ follows:</p>
+
+<blockquote>
+
+<p>If <tt>x</tt> is a real number that lies between two finite consecutive
+ floating-point numbers <tt>a</tt> and <tt>b</tt>, without being equal to one
+ of them, then <tt>ulp(x) = |b - a|</tt>, otherwise <tt>ulp(x)</tt> is the
+ distance between the two non-equal finite floating-point numbers nearest
+ <tt>x</tt>. Moreover, <tt>ulp(NaN)</tt> is <tt>NaN</tt>.</p>
+
+</blockquote>
+
+<p>The metadata node shall consist of a single positive floating point number
+ representing the maximum relative error, for example:</p>
+
+<div class="doc_code">
+<pre>
+!0 = metadata !{ float 2.5 } ; maximum acceptable inaccuracy is 2.5 ULPs
+</pre>
+</div>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="range">'<tt>range</tt>' Metadata</a>
+</h4>
+
+<div>
+<p><tt>range</tt> metadata may be attached only to loads of integer types. It
+ expresses the possible ranges the loaded value is in. The ranges are
+ represented with a flattened list of integers. The loaded value is known to
+ be in the union of the ranges defined by each consecutive pair. Each pair
+ has the following properties:</p>
+<ul>
+ <li>The type must match the type loaded by the instruction.</li>
+ <li>The pair <tt>a,b</tt> represents the range <tt>[a,b)</tt>.</li>
+ <li>Both <tt>a</tt> and <tt>b</tt> are constants.</li>
+ <li>The range is allowed to wrap.</li>
+ <li>The range should not represent the full or empty set. That is,
+ <tt>a!=b</tt>. </li>
+</ul>
+<p> In addition, the pairs must be in signed order of the lower bound and
+ they must be non-contiguous.</p>
+
+<p>Examples:</p>
+<div class="doc_code">
+<pre>
+ %a = load i8* %x, align 1, !range !0 ; Can only be 0 or 1
+ %b = load i8* %y, align 1, !range !1 ; Can only be 255 (-1), 0 or 1
+ %c = load i8* %z, align 1, !range !2 ; Can only be 0, 1, 3, 4 or 5
+ %d = load i8* %z, align 1, !range !3 ; Can only be -2, -1, 3, 4 or 5
+...
+!0 = metadata !{ i8 0, i8 2 }
+!1 = metadata !{ i8 255, i8 2 }
+!2 = metadata !{ i8 0, i8 2, i8 3, i8 6 }
+!3 = metadata !{ i8 -2, i8 0, i8 3, i8 6 }
+</pre>
+</div>
+</div>
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="module_flags">Module Flags Metadata</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Information about the module as a whole is difficult to convey to LLVM's
+ subsystems. The LLVM IR isn't sufficient to transmit this
+ information. The <tt>llvm.module.flags</tt> named metadata exists in order to
+ facilitate this. These flags are in the form of key / value pairs &mdash;
+ much like a dictionary &mdash; making it easy for any subsystem who cares
+ about a flag to look it up.</p>
+
+<p>The <tt>llvm.module.flags</tt> metadata contains a list of metadata
+ triplets. Each triplet has the following form:</p>
+
+<ul>
+ <li>The first element is a <i>behavior</i> flag, which specifies the behavior
+ when two (or more) modules are merged together, and it encounters two (or
+ more) metadata with the same ID. The supported behaviors are described
+ below.</li>
+
+ <li>The second element is a metadata string that is a unique ID for the
+ metadata. How each ID is interpreted is documented below.</li>
+
+ <li>The third element is the value of the flag.</li>
+</ul>
+
+<p>When two (or more) modules are merged together, the resulting
+ <tt>llvm.module.flags</tt> metadata is the union of the
+ modules' <tt>llvm.module.flags</tt> metadata. The only exception being a flag
+ with the <i>Override</i> behavior, which may override another flag's value
+ (see below).</p>
+
+<p>The following behaviors are supported:</p>
+
+<table border="1" cellspacing="0" cellpadding="4">
+ <tbody>
+ <tr>
+ <th>Value</th>
+ <th>Behavior</th>
+ </tr>
+ <tr>
+ <td>1</td>
+ <td align="left">
+ <dl>
+ <dt><b>Error</b></dt>
+ <dd>Emits an error if two values disagree. It is an error to have an ID
+ with both an Error and a Warning behavior.</dd>
+ </dl>
+ </td>
+ </tr>
+ <tr>
+ <td>2</td>
+ <td align="left">
+ <dl>
+ <dt><b>Warning</b></dt>
+ <dd>Emits a warning if two values disagree.</dd>
+ </dl>
+ </td>
+ </tr>
+ <tr>
+ <td>3</td>
+ <td align="left">
+ <dl>
+ <dt><b>Require</b></dt>
+ <dd>Emits an error when the specified value is not present or doesn't
+ have the specified value. It is an error for two (or more)
+ <tt>llvm.module.flags</tt> with the same ID to have the Require
+ behavior but different values. There may be multiple Require flags
+ per ID.</dd>
+ </dl>
+ </td>
+ </tr>
+ <tr>
+ <td>4</td>
+ <td align="left">
+ <dl>
+ <dt><b>Override</b></dt>
+ <dd>Uses the specified value if the two values disagree. It is an
+ error for two (or more) <tt>llvm.module.flags</tt> with the same
+ ID to have the Override behavior but different values.</dd>
+ </dl>
+ </td>
+ </tr>
+ </tbody>
+</table>
+
+<p>An example of module flags:</p>
+
+<pre class="doc_code">
+!0 = metadata !{ i32 1, metadata !"foo", i32 1 }
+!1 = metadata !{ i32 4, metadata !"bar", i32 37 }
+!2 = metadata !{ i32 2, metadata !"qux", i32 42 }
+!3 = metadata !{ i32 3, metadata !"qux",
+ metadata !{
+ metadata !"foo", i32 1
+ }
+}
+!llvm.module.flags = !{ !0, !1, !2, !3 }
+</pre>
+
+<ul>
+ <li><p>Metadata <tt>!0</tt> has the ID <tt>!"foo"</tt> and the value '1'. The
+ behavior if two or more <tt>!"foo"</tt> flags are seen is to emit an
+ error if their values are not equal.</p></li>
+
+ <li><p>Metadata <tt>!1</tt> has the ID <tt>!"bar"</tt> and the value '37'. The
+ behavior if two or more <tt>!"bar"</tt> flags are seen is to use the
+ value '37' if their values are not equal.</p></li>
+
+ <li><p>Metadata <tt>!2</tt> has the ID <tt>!"qux"</tt> and the value '42'. The
+ behavior if two or more <tt>!"qux"</tt> flags are seen is to emit a
+ warning if their values are not equal.</p></li>
+
+ <li><p>Metadata <tt>!3</tt> has the ID <tt>!"qux"</tt> and the value:</p>
+
+<pre class="doc_code">
+metadata !{ metadata !"foo", i32 1 }
+</pre>
+
+ <p>The behavior is to emit an error if the <tt>llvm.module.flags</tt> does
+ not contain a flag with the ID <tt>!"foo"</tt> that has the value
+ '1'. If two or more <tt>!"qux"</tt> flags exist, then they must have
+ the same value or an error will be issued.</p></li>
+</ul>
+
+
+<!-- ======================================================================= -->
+<h3>
+<a name="objc_gc_flags">Objective-C Garbage Collection Module Flags Metadata</a>
+</h3>
+
+<div>
+
+<p>On the Mach-O platform, Objective-C stores metadata about garbage collection
+ in a special section called "image info". The metadata consists of a version
+ number and a bitmask specifying what types of garbage collection are
+ supported (if any) by the file. If two or more modules are linked together
+ their garbage collection metadata needs to be merged rather than appended
+ together.</p>
+
+<p>The Objective-C garbage collection module flags metadata consists of the
+ following key-value pairs:</p>
+
+<table border="1" cellspacing="0" cellpadding="4">
+ <col width="30%">
+ <tbody>
+ <tr>
+ <th>Key</th>
+ <th>Value</th>
+ </tr>
+ <tr>
+ <td><tt>Objective-C&nbsp;Version</tt></td>
+ <td align="left"><b>[Required]</b> &mdash; The Objective-C ABI
+ version. Valid values are 1 and 2.</td>
+ </tr>
+ <tr>
+ <td><tt>Objective-C&nbsp;Image&nbsp;Info&nbsp;Version</tt></td>
+ <td align="left"><b>[Required]</b> &mdash; The version of the image info
+ section. Currently always 0.</td>
+ </tr>
+ <tr>
+ <td><tt>Objective-C&nbsp;Image&nbsp;Info&nbsp;Section</tt></td>
+ <td align="left"><b>[Required]</b> &mdash; The section to place the
+ metadata. Valid values are <tt>"__OBJC, __image_info, regular"</tt> for
+ Objective-C ABI version 1, and <tt>"__DATA,__objc_imageinfo, regular,
+ no_dead_strip"</tt> for Objective-C ABI version 2.</td>
+ </tr>
+ <tr>
+ <td><tt>Objective-C&nbsp;Garbage&nbsp;Collection</tt></td>
+ <td align="left"><b>[Required]</b> &mdash; Specifies whether garbage
+ collection is supported or not. Valid values are 0, for no garbage
+ collection, and 2, for garbage collection supported.</td>
+ </tr>
+ <tr>
+ <td><tt>Objective-C&nbsp;GC&nbsp;Only</tt></td>
+ <td align="left"><b>[Optional]</b> &mdash; Specifies that only garbage
+ collection is supported. If present, its value must be 6. This flag
+ requires that the <tt>Objective-C Garbage Collection</tt> flag have the
+ value 2.</td>
+ </tr>
+ </tbody>
+</table>
+
+<p>Some important flag interactions:</p>
+
+<ul>
+ <li>If a module with <tt>Objective-C Garbage Collection</tt> set to 0 is
+ merged with a module with <tt>Objective-C Garbage Collection</tt> set to
+ 2, then the resulting module has the <tt>Objective-C Garbage
+ Collection</tt> flag set to 0.</li>
+
+ <li>A module with <tt>Objective-C Garbage Collection</tt> set to 0 cannot be
+ merged with a module with <tt>Objective-C GC Only</tt> set to 6.</li>
+</ul>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="intrinsic_globals">Intrinsic Global Variables</a>
+</h2>
+<!-- *********************************************************************** -->
+<div>
+<p>LLVM has a number of "magic" global variables that contain data that affect
+code generation or other IR semantics. These are documented here. All globals
+of this sort should have a section specified as "<tt>llvm.metadata</tt>". This
+section and all globals that start with "<tt>llvm.</tt>" are reserved for use
+by LLVM.</p>
+
+<!-- ======================================================================= -->
+<h3>
+<a name="intg_used">The '<tt>llvm.used</tt>' Global Variable</a>
+</h3>
+
+<div>
+
+<p>The <tt>@llvm.used</tt> global is an array with i8* element type which has <a
+href="#linkage_appending">appending linkage</a>. This array contains a list of
+pointers to global variables and functions which may optionally have a pointer
+cast formed of bitcast or getelementptr. For example, a legal use of it is:</p>
+
+<div class="doc_code">
+<pre>
+@X = global i8 4
+@Y = global i32 123
+
+@llvm.used = appending global [2 x i8*] [
+ i8* @X,
+ i8* bitcast (i32* @Y to i8*)
+], section "llvm.metadata"
+</pre>
+</div>
+
+<p>If a global variable appears in the <tt>@llvm.used</tt> list, then the
+ compiler, assembler, and linker are required to treat the symbol as if there
+ is a reference to the global that it cannot see. For example, if a variable
+ has internal linkage and no references other than that from
+ the <tt>@llvm.used</tt> list, it cannot be deleted. This is commonly used to
+ represent references from inline asms and other things the compiler cannot
+ "see", and corresponds to "<tt>attribute((used))</tt>" in GNU C.</p>
+
+<p>On some targets, the code generator must emit a directive to the assembler or
+ object file to prevent the assembler and linker from molesting the
+ symbol.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="intg_compiler_used">
+ The '<tt>llvm.compiler.used</tt>' Global Variable
+ </a>
+</h3>
+
+<div>
+
+<p>The <tt>@llvm.compiler.used</tt> directive is the same as the
+ <tt>@llvm.used</tt> directive, except that it only prevents the compiler from
+ touching the symbol. On targets that support it, this allows an intelligent
+ linker to optimize references to the symbol without being impeded as it would
+ be by <tt>@llvm.used</tt>.</p>
+
+<p>This is a rare construct that should only be used in rare circumstances, and
+ should not be exposed to source languages.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+<a name="intg_global_ctors">The '<tt>llvm.global_ctors</tt>' Global Variable</a>
+</h3>
+
+<div>
+
+<div class="doc_code">
+<pre>
+%0 = type { i32, void ()* }
+@llvm.global_ctors = appending global [1 x %0] [%0 { i32 65535, void ()* @ctor }]
+</pre>
+</div>
+
+<p>The <tt>@llvm.global_ctors</tt> array contains a list of constructor
+ functions and associated priorities. The functions referenced by this array
+ will be called in ascending order of priority (i.e. lowest first) when the
+ module is loaded. The order of functions with the same priority is not
+ defined.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+<a name="intg_global_dtors">The '<tt>llvm.global_dtors</tt>' Global Variable</a>
+</h3>
+
+<div>
+
+<div class="doc_code">
+<pre>
+%0 = type { i32, void ()* }
+@llvm.global_dtors = appending global [1 x %0] [%0 { i32 65535, void ()* @dtor }]
+</pre>
+</div>
+
+<p>The <tt>@llvm.global_dtors</tt> array contains a list of destructor functions
+ and associated priorities. The functions referenced by this array will be
+ called in descending order of priority (i.e. highest first) when the module
+ is loaded. The order of functions with the same priority is not defined.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="instref">Instruction Reference</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The LLVM instruction set consists of several different classifications of
+ instructions: <a href="#terminators">terminator
+ instructions</a>, <a href="#binaryops">binary instructions</a>,
+ <a href="#bitwiseops">bitwise binary instructions</a>,
+ <a href="#memoryops">memory instructions</a>, and
+ <a href="#otherops">other instructions</a>.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="terminators">Terminator Instructions</a>
+</h3>
+
+<div>
+
+<p>As mentioned <a href="#functionstructure">previously</a>, every basic block
+ in a program ends with a "Terminator" instruction, which indicates which
+ block should be executed after the current block is finished. These
+ terminator instructions typically yield a '<tt>void</tt>' value: they produce
+ control flow, not values (the one exception being the
+ '<a href="#i_invoke"><tt>invoke</tt></a>' instruction).</p>
+
+<p>The terminator instructions are:
+ '<a href="#i_ret"><tt>ret</tt></a>',
+ '<a href="#i_br"><tt>br</tt></a>',
+ '<a href="#i_switch"><tt>switch</tt></a>',
+ '<a href="#i_indirectbr"><tt>indirectbr</tt></a>',
+ '<a href="#i_invoke"><tt>invoke</tt></a>',
+ '<a href="#i_resume"><tt>resume</tt></a>', and
+ '<a href="#i_unreachable"><tt>unreachable</tt></a>'.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_ret">'<tt>ret</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ ret &lt;type&gt; &lt;value&gt; <i>; Return a value from a non-void function</i>
+ ret void <i>; Return from void function</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>ret</tt>' instruction is used to return control flow (and optionally
+ a value) from a function back to the caller.</p>
+
+<p>There are two forms of the '<tt>ret</tt>' instruction: one that returns a
+ value and then causes control flow, and one that just causes control flow to
+ occur.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>ret</tt>' instruction optionally accepts a single argument, the
+ return value. The type of the return value must be a
+ '<a href="#t_firstclass">first class</a>' type.</p>
+
+<p>A function is not <a href="#wellformed">well formed</a> if it it has a
+ non-void return type and contains a '<tt>ret</tt>' instruction with no return
+ value or a return value with a type that does not match its type, or if it
+ has a void return type and contains a '<tt>ret</tt>' instruction with a
+ return value.</p>
+
+<h5>Semantics:</h5>
+<p>When the '<tt>ret</tt>' instruction is executed, control flow returns back to
+ the calling function's context. If the caller is a
+ "<a href="#i_call"><tt>call</tt></a>" instruction, execution continues at the
+ instruction after the call. If the caller was an
+ "<a href="#i_invoke"><tt>invoke</tt></a>" instruction, execution continues at
+ the beginning of the "normal" destination block. If the instruction returns
+ a value, that value shall set the call or invoke instruction's return
+ value.</p>
+
+<h5>Example:</h5>
+<pre>
+ ret i32 5 <i>; Return an integer value of 5</i>
+ ret void <i>; Return from a void function</i>
+ ret { i32, i8 } { i32 4, i8 2 } <i>; Return a struct of values 4 and 2</i>
+</pre>
+
+</div>
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_br">'<tt>br</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ br i1 &lt;cond&gt;, label &lt;iftrue&gt;, label &lt;iffalse&gt;
+ br label &lt;dest&gt; <i>; Unconditional branch</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>br</tt>' instruction is used to cause control flow to transfer to a
+ different basic block in the current function. There are two forms of this
+ instruction, corresponding to a conditional branch and an unconditional
+ branch.</p>
+
+<h5>Arguments:</h5>
+<p>The conditional branch form of the '<tt>br</tt>' instruction takes a single
+ '<tt>i1</tt>' value and two '<tt>label</tt>' values. The unconditional form
+ of the '<tt>br</tt>' instruction takes a single '<tt>label</tt>' value as a
+ target.</p>
+
+<h5>Semantics:</h5>
+<p>Upon execution of a conditional '<tt>br</tt>' instruction, the '<tt>i1</tt>'
+ argument is evaluated. If the value is <tt>true</tt>, control flows to the
+ '<tt>iftrue</tt>' <tt>label</tt> argument. If "cond" is <tt>false</tt>,
+ control flows to the '<tt>iffalse</tt>' <tt>label</tt> argument.</p>
+
+<h5>Example:</h5>
+<pre>
+Test:
+ %cond = <a href="#i_icmp">icmp</a> eq i32 %a, %b
+ br i1 %cond, label %IfEqual, label %IfUnequal
+IfEqual:
+ <a href="#i_ret">ret</a> i32 1
+IfUnequal:
+ <a href="#i_ret">ret</a> i32 0
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_switch">'<tt>switch</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ switch &lt;intty&gt; &lt;value&gt;, label &lt;defaultdest&gt; [ &lt;intty&gt; &lt;val&gt;, label &lt;dest&gt; ... ]
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>switch</tt>' instruction is used to transfer control flow to one of
+ several different places. It is a generalization of the '<tt>br</tt>'
+ instruction, allowing a branch to occur to one of many possible
+ destinations.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>switch</tt>' instruction uses three parameters: an integer
+ comparison value '<tt>value</tt>', a default '<tt>label</tt>' destination,
+ and an array of pairs of comparison value constants and '<tt>label</tt>'s.
+ The table is not allowed to contain duplicate constant entries.</p>
+
+<h5>Semantics:</h5>
+<p>The <tt>switch</tt> instruction specifies a table of values and
+ destinations. When the '<tt>switch</tt>' instruction is executed, this table
+ is searched for the given value. If the value is found, control flow is
+ transferred to the corresponding destination; otherwise, control flow is
+ transferred to the default destination.</p>
+
+<h5>Implementation:</h5>
+<p>Depending on properties of the target machine and the particular
+ <tt>switch</tt> instruction, this instruction may be code generated in
+ different ways. For example, it could be generated as a series of chained
+ conditional branches or with a lookup table.</p>
+
+<h5>Example:</h5>
+<pre>
+ <i>; Emulate a conditional br instruction</i>
+ %Val = <a href="#i_zext">zext</a> i1 %value to i32
+ switch i32 %Val, label %truedest [ i32 0, label %falsedest ]
+
+ <i>; Emulate an unconditional br instruction</i>
+ switch i32 0, label %dest [ ]
+
+ <i>; Implement a jump table:</i>
+ switch i32 %val, label %otherwise [ i32 0, label %onzero
+ i32 1, label %onone
+ i32 2, label %ontwo ]
+</pre>
+
+</div>
+
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_indirectbr">'<tt>indirectbr</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ indirectbr &lt;somety&gt;* &lt;address&gt;, [ label &lt;dest1&gt;, label &lt;dest2&gt;, ... ]
+</pre>
+
+<h5>Overview:</h5>
+
+<p>The '<tt>indirectbr</tt>' instruction implements an indirect branch to a label
+ within the current function, whose address is specified by
+ "<tt>address</tt>". Address must be derived from a <a
+ href="#blockaddress">blockaddress</a> constant.</p>
+
+<h5>Arguments:</h5>
+
+<p>The '<tt>address</tt>' argument is the address of the label to jump to. The
+ rest of the arguments indicate the full set of possible destinations that the
+ address may point to. Blocks are allowed to occur multiple times in the
+ destination list, though this isn't particularly useful.</p>
+
+<p>This destination list is required so that dataflow analysis has an accurate
+ understanding of the CFG.</p>
+
+<h5>Semantics:</h5>
+
+<p>Control transfers to the block specified in the address argument. All
+ possible destination blocks must be listed in the label list, otherwise this
+ instruction has undefined behavior. This implies that jumps to labels
+ defined in other functions have undefined behavior as well.</p>
+
+<h5>Implementation:</h5>
+
+<p>This is typically implemented with a jump through a register.</p>
+
+<h5>Example:</h5>
+<pre>
+ indirectbr i8* %Addr, [ label %bb1, label %bb2, label %bb3 ]
+</pre>
+
+</div>
+
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_invoke">'<tt>invoke</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = invoke [<a href="#callingconv">cconv</a>] [<a href="#paramattrs">ret attrs</a>] &lt;ptr to function ty&gt; &lt;function ptr val&gt;(&lt;function args&gt;) [<a href="#fnattrs">fn attrs</a>]
+ to label &lt;normal label&gt; unwind label &lt;exception label&gt;
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>invoke</tt>' instruction causes control to transfer to a specified
+ function, with the possibility of control flow transfer to either the
+ '<tt>normal</tt>' label or the '<tt>exception</tt>' label. If the callee
+ function returns with the "<tt><a href="#i_ret">ret</a></tt>" instruction,
+ control flow will return to the "normal" label. If the callee (or any
+ indirect callees) returns via the "<a href="#i_resume"><tt>resume</tt></a>"
+ instruction or other exception handling mechanism, control is interrupted and
+ continued at the dynamically nearest "exception" label.</p>
+
+<p>The '<tt>exception</tt>' label is a
+ <i><a href="ExceptionHandling.html#overview">landing pad</a></i> for the
+ exception. As such, '<tt>exception</tt>' label is required to have the
+ "<a href="#i_landingpad"><tt>landingpad</tt></a>" instruction, which contains
+ the information about the behavior of the program after unwinding
+ happens, as its first non-PHI instruction. The restrictions on the
+ "<tt>landingpad</tt>" instruction's tightly couples it to the
+ "<tt>invoke</tt>" instruction, so that the important information contained
+ within the "<tt>landingpad</tt>" instruction can't be lost through normal
+ code motion.</p>
+
+<h5>Arguments:</h5>
+<p>This instruction requires several arguments:</p>
+
+<ol>
+ <li>The optional "cconv" marker indicates which <a href="#callingconv">calling
+ convention</a> the call should use. If none is specified, the call
+ defaults to using C calling conventions.</li>
+
+ <li>The optional <a href="#paramattrs">Parameter Attributes</a> list for
+ return values. Only '<tt>zeroext</tt>', '<tt>signext</tt>', and
+ '<tt>inreg</tt>' attributes are valid here.</li>
+
+ <li>'<tt>ptr to function ty</tt>': shall be the signature of the pointer to
+ function value being invoked. In most cases, this is a direct function
+ invocation, but indirect <tt>invoke</tt>s are just as possible, branching
+ off an arbitrary pointer to function value.</li>
+
+ <li>'<tt>function ptr val</tt>': An LLVM value containing a pointer to a
+ function to be invoked. </li>
+
+ <li>'<tt>function args</tt>': argument list whose types match the function
+ signature argument types and parameter attributes. All arguments must be
+ of <a href="#t_firstclass">first class</a> type. If the function
+ signature indicates the function accepts a variable number of arguments,
+ the extra arguments can be specified.</li>
+
+ <li>'<tt>normal label</tt>': the label reached when the called function
+ executes a '<tt><a href="#i_ret">ret</a></tt>' instruction. </li>
+
+ <li>'<tt>exception label</tt>': the label reached when a callee returns via
+ the <a href="#i_resume"><tt>resume</tt></a> instruction or other exception
+ handling mechanism.</li>
+
+ <li>The optional <a href="#fnattrs">function attributes</a> list. Only
+ '<tt>noreturn</tt>', '<tt>nounwind</tt>', '<tt>readonly</tt>' and
+ '<tt>readnone</tt>' attributes are valid here.</li>
+</ol>
+
+<h5>Semantics:</h5>
+<p>This instruction is designed to operate as a standard
+ '<tt><a href="#i_call">call</a></tt>' instruction in most regards. The
+ primary difference is that it establishes an association with a label, which
+ is used by the runtime library to unwind the stack.</p>
+
+<p>This instruction is used in languages with destructors to ensure that proper
+ cleanup is performed in the case of either a <tt>longjmp</tt> or a thrown
+ exception. Additionally, this is important for implementation of
+ '<tt>catch</tt>' clauses in high-level languages that support them.</p>
+
+<p>For the purposes of the SSA form, the definition of the value returned by the
+ '<tt>invoke</tt>' instruction is deemed to occur on the edge from the current
+ block to the "normal" label. If the callee unwinds then no return value is
+ available.</p>
+
+<h5>Example:</h5>
+<pre>
+ %retval = invoke i32 @Test(i32 15) to label %Continue
+ unwind label %TestCleanup <i>; {i32}:retval set</i>
+ %retval = invoke <a href="#callingconv">coldcc</a> i32 %Testfnptr(i32 15) to label %Continue
+ unwind label %TestCleanup <i>; {i32}:retval set</i>
+</pre>
+
+</div>
+
+ <!-- _______________________________________________________________________ -->
+
+<h4>
+ <a name="i_resume">'<tt>resume</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ resume &lt;type&gt; &lt;value&gt;
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>resume</tt>' instruction is a terminator instruction that has no
+ successors.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>resume</tt>' instruction requires one argument, which must have the
+ same type as the result of any '<tt>landingpad</tt>' instruction in the same
+ function.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>resume</tt>' instruction resumes propagation of an existing
+ (in-flight) exception whose unwinding was interrupted with
+ a <a href="#i_landingpad"><tt>landingpad</tt></a> instruction.</p>
+
+<h5>Example:</h5>
+<pre>
+ resume { i8*, i32 } %exn
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+
+<h4>
+ <a name="i_unreachable">'<tt>unreachable</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ unreachable
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>unreachable</tt>' instruction has no defined semantics. This
+ instruction is used to inform the optimizer that a particular portion of the
+ code is not reachable. This can be used to indicate that the code after a
+ no-return function cannot be reached, and other facts.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>unreachable</tt>' instruction has no defined semantics.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="binaryops">Binary Operations</a>
+</h3>
+
+<div>
+
+<p>Binary operators are used to do most of the computation in a program. They
+ require two operands of the same type, execute an operation on them, and
+ produce a single value. The operands might represent multiple data, as is
+ the case with the <a href="#t_vector">vector</a> data type. The result value
+ has the same type as its operands.</p>
+
+<p>There are several different binary operators:</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_add">'<tt>add</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = add &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = add nuw &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = add nsw &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = add nuw nsw &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>add</tt>' instruction returns the sum of its two operands.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>add</tt>' instruction must
+ be <a href="#t_integer">integer</a> or <a href="#t_vector">vector</a> of
+ integer values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The value produced is the integer sum of the two operands.</p>
+
+<p>If the sum has unsigned overflow, the result returned is the mathematical
+ result modulo 2<sup>n</sup>, where n is the bit width of the result.</p>
+
+<p>Because LLVM integers use a two's complement representation, this instruction
+ is appropriate for both signed and unsigned integers.</p>
+
+<p><tt>nuw</tt> and <tt>nsw</tt> stand for &quot;No Unsigned Wrap&quot;
+ and &quot;No Signed Wrap&quot;, respectively. If the <tt>nuw</tt> and/or
+ <tt>nsw</tt> keywords are present, the result value of the <tt>add</tt>
+ is a <a href="#poisonvalues">poison value</a> if unsigned and/or signed overflow,
+ respectively, occurs.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = add i32 4, %var <i>; yields {i32}:result = 4 + %var</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_fadd">'<tt>fadd</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = fadd &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>fadd</tt>' instruction returns the sum of its two operands.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>fadd</tt>' instruction must be
+ <a href="#t_floating">floating point</a> or <a href="#t_vector">vector</a> of
+ floating point values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The value produced is the floating point sum of the two operands.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = fadd float 4.0, %var <i>; yields {float}:result = 4.0 + %var</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_sub">'<tt>sub</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = sub &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = sub nuw &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = sub nsw &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = sub nuw nsw &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>sub</tt>' instruction returns the difference of its two
+ operands.</p>
+
+<p>Note that the '<tt>sub</tt>' instruction is used to represent the
+ '<tt>neg</tt>' instruction present in most other intermediate
+ representations.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>sub</tt>' instruction must
+ be <a href="#t_integer">integer</a> or <a href="#t_vector">vector</a> of
+ integer values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The value produced is the integer difference of the two operands.</p>
+
+<p>If the difference has unsigned overflow, the result returned is the
+ mathematical result modulo 2<sup>n</sup>, where n is the bit width of the
+ result.</p>
+
+<p>Because LLVM integers use a two's complement representation, this instruction
+ is appropriate for both signed and unsigned integers.</p>
+
+<p><tt>nuw</tt> and <tt>nsw</tt> stand for &quot;No Unsigned Wrap&quot;
+ and &quot;No Signed Wrap&quot;, respectively. If the <tt>nuw</tt> and/or
+ <tt>nsw</tt> keywords are present, the result value of the <tt>sub</tt>
+ is a <a href="#poisonvalues">poison value</a> if unsigned and/or signed overflow,
+ respectively, occurs.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = sub i32 4, %var <i>; yields {i32}:result = 4 - %var</i>
+ &lt;result&gt; = sub i32 0, %val <i>; yields {i32}:result = -%var</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_fsub">'<tt>fsub</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = fsub &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>fsub</tt>' instruction returns the difference of its two
+ operands.</p>
+
+<p>Note that the '<tt>fsub</tt>' instruction is used to represent the
+ '<tt>fneg</tt>' instruction present in most other intermediate
+ representations.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>fsub</tt>' instruction must be
+ <a href="#t_floating">floating point</a> or <a href="#t_vector">vector</a> of
+ floating point values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The value produced is the floating point difference of the two operands.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = fsub float 4.0, %var <i>; yields {float}:result = 4.0 - %var</i>
+ &lt;result&gt; = fsub float -0.0, %val <i>; yields {float}:result = -%var</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_mul">'<tt>mul</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = mul &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = mul nuw &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = mul nsw &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = mul nuw nsw &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>mul</tt>' instruction returns the product of its two operands.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>mul</tt>' instruction must
+ be <a href="#t_integer">integer</a> or <a href="#t_vector">vector</a> of
+ integer values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The value produced is the integer product of the two operands.</p>
+
+<p>If the result of the multiplication has unsigned overflow, the result
+ returned is the mathematical result modulo 2<sup>n</sup>, where n is the bit
+ width of the result.</p>
+
+<p>Because LLVM integers use a two's complement representation, and the result
+ is the same width as the operands, this instruction returns the correct
+ result for both signed and unsigned integers. If a full product
+ (e.g. <tt>i32</tt>x<tt>i32</tt>-><tt>i64</tt>) is needed, the operands should
+ be sign-extended or zero-extended as appropriate to the width of the full
+ product.</p>
+
+<p><tt>nuw</tt> and <tt>nsw</tt> stand for &quot;No Unsigned Wrap&quot;
+ and &quot;No Signed Wrap&quot;, respectively. If the <tt>nuw</tt> and/or
+ <tt>nsw</tt> keywords are present, the result value of the <tt>mul</tt>
+ is a <a href="#poisonvalues">poison value</a> if unsigned and/or signed overflow,
+ respectively, occurs.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = mul i32 4, %var <i>; yields {i32}:result = 4 * %var</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_fmul">'<tt>fmul</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = fmul &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>fmul</tt>' instruction returns the product of its two operands.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>fmul</tt>' instruction must be
+ <a href="#t_floating">floating point</a> or <a href="#t_vector">vector</a> of
+ floating point values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The value produced is the floating point product of the two operands.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = fmul float 4.0, %var <i>; yields {float}:result = 4.0 * %var</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_udiv">'<tt>udiv</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = udiv &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = udiv exact &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>udiv</tt>' instruction returns the quotient of its two operands.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>udiv</tt>' instruction must be
+ <a href="#t_integer">integer</a> or <a href="#t_vector">vector</a> of integer
+ values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The value produced is the unsigned integer quotient of the two operands.</p>
+
+<p>Note that unsigned integer division and signed integer division are distinct
+ operations; for signed integer division, use '<tt>sdiv</tt>'.</p>
+
+<p>Division by zero leads to undefined behavior.</p>
+
+<p>If the <tt>exact</tt> keyword is present, the result value of the
+ <tt>udiv</tt> is a <a href="#poisonvalues">poison value</a> if %op1 is not a
+ multiple of %op2 (as such, "((a udiv exact b) mul b) == a").</p>
+
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = udiv i32 4, %var <i>; yields {i32}:result = 4 / %var</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_sdiv">'<tt>sdiv</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = sdiv &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = sdiv exact &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>sdiv</tt>' instruction returns the quotient of its two operands.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>sdiv</tt>' instruction must be
+ <a href="#t_integer">integer</a> or <a href="#t_vector">vector</a> of integer
+ values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The value produced is the signed integer quotient of the two operands rounded
+ towards zero.</p>
+
+<p>Note that signed integer division and unsigned integer division are distinct
+ operations; for unsigned integer division, use '<tt>udiv</tt>'.</p>
+
+<p>Division by zero leads to undefined behavior. Overflow also leads to
+ undefined behavior; this is a rare case, but can occur, for example, by doing
+ a 32-bit division of -2147483648 by -1.</p>
+
+<p>If the <tt>exact</tt> keyword is present, the result value of the
+ <tt>sdiv</tt> is a <a href="#poisonvalues">poison value</a> if the result would
+ be rounded.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = sdiv i32 4, %var <i>; yields {i32}:result = 4 / %var</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_fdiv">'<tt>fdiv</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = fdiv &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>fdiv</tt>' instruction returns the quotient of its two operands.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>fdiv</tt>' instruction must be
+ <a href="#t_floating">floating point</a> or <a href="#t_vector">vector</a> of
+ floating point values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The value produced is the floating point quotient of the two operands.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = fdiv float 4.0, %var <i>; yields {float}:result = 4.0 / %var</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_urem">'<tt>urem</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = urem &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>urem</tt>' instruction returns the remainder from the unsigned
+ division of its two arguments.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>urem</tt>' instruction must be
+ <a href="#t_integer">integer</a> or <a href="#t_vector">vector</a> of integer
+ values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>This instruction returns the unsigned integer <i>remainder</i> of a division.
+ This instruction always performs an unsigned division to get the
+ remainder.</p>
+
+<p>Note that unsigned integer remainder and signed integer remainder are
+ distinct operations; for signed integer remainder, use '<tt>srem</tt>'.</p>
+
+<p>Taking the remainder of a division by zero leads to undefined behavior.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = urem i32 4, %var <i>; yields {i32}:result = 4 % %var</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_srem">'<tt>srem</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = srem &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>srem</tt>' instruction returns the remainder from the signed
+ division of its two operands. This instruction can also take
+ <a href="#t_vector">vector</a> versions of the values in which case the
+ elements must be integers.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>srem</tt>' instruction must be
+ <a href="#t_integer">integer</a> or <a href="#t_vector">vector</a> of integer
+ values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>This instruction returns the <i>remainder</i> of a division (where the result
+ is either zero or has the same sign as the dividend, <tt>op1</tt>), not the
+ <i>modulo</i> operator (where the result is either zero or has the same sign
+ as the divisor, <tt>op2</tt>) of a value.
+ For more information about the difference,
+ see <a href="http://mathforum.org/dr.math/problems/anne.4.28.99.html">The
+ Math Forum</a>. For a table of how this is implemented in various languages,
+ please see <a href="http://en.wikipedia.org/wiki/Modulo_operation">
+ Wikipedia: modulo operation</a>.</p>
+
+<p>Note that signed integer remainder and unsigned integer remainder are
+ distinct operations; for unsigned integer remainder, use '<tt>urem</tt>'.</p>
+
+<p>Taking the remainder of a division by zero leads to undefined behavior.
+ Overflow also leads to undefined behavior; this is a rare case, but can
+ occur, for example, by taking the remainder of a 32-bit division of
+ -2147483648 by -1. (The remainder doesn't actually overflow, but this rule
+ lets srem be implemented using instructions that return both the result of
+ the division and the remainder.)</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = srem i32 4, %var <i>; yields {i32}:result = 4 % %var</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_frem">'<tt>frem</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = frem &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>frem</tt>' instruction returns the remainder from the division of
+ its two operands.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>frem</tt>' instruction must be
+ <a href="#t_floating">floating point</a> or <a href="#t_vector">vector</a> of
+ floating point values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>This instruction returns the <i>remainder</i> of a division. The remainder
+ has the same sign as the dividend.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = frem float 4.0, %var <i>; yields {float}:result = 4.0 % %var</i>
+</pre>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="bitwiseops">Bitwise Binary Operations</a>
+</h3>
+
+<div>
+
+<p>Bitwise binary operators are used to do various forms of bit-twiddling in a
+ program. They are generally very efficient instructions and can commonly be
+ strength reduced from other instructions. They require two operands of the
+ same type, execute an operation on them, and produce a single value. The
+ resulting value is the same type as its operands.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_shl">'<tt>shl</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = shl &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = shl nuw &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = shl nsw &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = shl nuw nsw &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>shl</tt>' instruction returns the first operand shifted to the left
+ a specified number of bits.</p>
+
+<h5>Arguments:</h5>
+<p>Both arguments to the '<tt>shl</tt>' instruction must be the
+ same <a href="#t_integer">integer</a> or <a href="#t_vector">vector</a> of
+ integer type. '<tt>op2</tt>' is treated as an unsigned value.</p>
+
+<h5>Semantics:</h5>
+<p>The value produced is <tt>op1</tt> * 2<sup><tt>op2</tt></sup> mod
+ 2<sup>n</sup>, where <tt>n</tt> is the width of the result. If <tt>op2</tt>
+ is (statically or dynamically) negative or equal to or larger than the number
+ of bits in <tt>op1</tt>, the result is undefined. If the arguments are
+ vectors, each vector element of <tt>op1</tt> is shifted by the corresponding
+ shift amount in <tt>op2</tt>.</p>
+
+<p>If the <tt>nuw</tt> keyword is present, then the shift produces a
+ <a href="#poisonvalues">poison value</a> if it shifts out any non-zero bits. If
+ the <tt>nsw</tt> keyword is present, then the shift produces a
+ <a href="#poisonvalues">poison value</a> if it shifts out any bits that disagree
+ with the resultant sign bit. As such, NUW/NSW have the same semantics as
+ they would if the shift were expressed as a mul instruction with the same
+ nsw/nuw bits in (mul %op1, (shl 1, %op2)).</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = shl i32 4, %var <i>; yields {i32}: 4 &lt;&lt; %var</i>
+ &lt;result&gt; = shl i32 4, 2 <i>; yields {i32}: 16</i>
+ &lt;result&gt; = shl i32 1, 10 <i>; yields {i32}: 1024</i>
+ &lt;result&gt; = shl i32 1, 32 <i>; undefined</i>
+ &lt;result&gt; = shl &lt;2 x i32&gt; &lt; i32 1, i32 1&gt;, &lt; i32 1, i32 2&gt; <i>; yields: result=&lt;2 x i32&gt; &lt; i32 2, i32 4&gt;</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_lshr">'<tt>lshr</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = lshr &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = lshr exact &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>lshr</tt>' instruction (logical shift right) returns the first
+ operand shifted to the right a specified number of bits with zero fill.</p>
+
+<h5>Arguments:</h5>
+<p>Both arguments to the '<tt>lshr</tt>' instruction must be the same
+ <a href="#t_integer">integer</a> or <a href="#t_vector">vector</a> of integer
+ type. '<tt>op2</tt>' is treated as an unsigned value.</p>
+
+<h5>Semantics:</h5>
+<p>This instruction always performs a logical shift right operation. The most
+ significant bits of the result will be filled with zero bits after the shift.
+ If <tt>op2</tt> is (statically or dynamically) equal to or larger than the
+ number of bits in <tt>op1</tt>, the result is undefined. If the arguments are
+ vectors, each vector element of <tt>op1</tt> is shifted by the corresponding
+ shift amount in <tt>op2</tt>.</p>
+
+<p>If the <tt>exact</tt> keyword is present, the result value of the
+ <tt>lshr</tt> is a <a href="#poisonvalues">poison value</a> if any of the bits
+ shifted out are non-zero.</p>
+
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = lshr i32 4, 1 <i>; yields {i32}:result = 2</i>
+ &lt;result&gt; = lshr i32 4, 2 <i>; yields {i32}:result = 1</i>
+ &lt;result&gt; = lshr i8 4, 3 <i>; yields {i8}:result = 0</i>
+ &lt;result&gt; = lshr i8 -2, 1 <i>; yields {i8}:result = 0x7FFFFFFF </i>
+ &lt;result&gt; = lshr i32 1, 32 <i>; undefined</i>
+ &lt;result&gt; = lshr &lt;2 x i32&gt; &lt; i32 -2, i32 4&gt;, &lt; i32 1, i32 2&gt; <i>; yields: result=&lt;2 x i32&gt; &lt; i32 0x7FFFFFFF, i32 1&gt;</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_ashr">'<tt>ashr</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = ashr &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+ &lt;result&gt; = ashr exact &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>ashr</tt>' instruction (arithmetic shift right) returns the first
+ operand shifted to the right a specified number of bits with sign
+ extension.</p>
+
+<h5>Arguments:</h5>
+<p>Both arguments to the '<tt>ashr</tt>' instruction must be the same
+ <a href="#t_integer">integer</a> or <a href="#t_vector">vector</a> of integer
+ type. '<tt>op2</tt>' is treated as an unsigned value.</p>
+
+<h5>Semantics:</h5>
+<p>This instruction always performs an arithmetic shift right operation, The
+ most significant bits of the result will be filled with the sign bit
+ of <tt>op1</tt>. If <tt>op2</tt> is (statically or dynamically) equal to or
+ larger than the number of bits in <tt>op1</tt>, the result is undefined. If
+ the arguments are vectors, each vector element of <tt>op1</tt> is shifted by
+ the corresponding shift amount in <tt>op2</tt>.</p>
+
+<p>If the <tt>exact</tt> keyword is present, the result value of the
+ <tt>ashr</tt> is a <a href="#poisonvalues">poison value</a> if any of the bits
+ shifted out are non-zero.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = ashr i32 4, 1 <i>; yields {i32}:result = 2</i>
+ &lt;result&gt; = ashr i32 4, 2 <i>; yields {i32}:result = 1</i>
+ &lt;result&gt; = ashr i8 4, 3 <i>; yields {i8}:result = 0</i>
+ &lt;result&gt; = ashr i8 -2, 1 <i>; yields {i8}:result = -1</i>
+ &lt;result&gt; = ashr i32 1, 32 <i>; undefined</i>
+ &lt;result&gt; = ashr &lt;2 x i32&gt; &lt; i32 -2, i32 4&gt;, &lt; i32 1, i32 3&gt; <i>; yields: result=&lt;2 x i32&gt; &lt; i32 -1, i32 0&gt;</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_and">'<tt>and</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = and &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>and</tt>' instruction returns the bitwise logical and of its two
+ operands.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>and</tt>' instruction must be
+ <a href="#t_integer">integer</a> or <a href="#t_vector">vector</a> of integer
+ values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The truth table used for the '<tt>and</tt>' instruction is:</p>
+
+<table border="1" cellspacing="0" cellpadding="4">
+ <tbody>
+ <tr>
+ <th>In0</th>
+ <th>In1</th>
+ <th>Out</th>
+ </tr>
+ <tr>
+ <td>0</td>
+ <td>0</td>
+ <td>0</td>
+ </tr>
+ <tr>
+ <td>0</td>
+ <td>1</td>
+ <td>0</td>
+ </tr>
+ <tr>
+ <td>1</td>
+ <td>0</td>
+ <td>0</td>
+ </tr>
+ <tr>
+ <td>1</td>
+ <td>1</td>
+ <td>1</td>
+ </tr>
+ </tbody>
+</table>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = and i32 4, %var <i>; yields {i32}:result = 4 &amp; %var</i>
+ &lt;result&gt; = and i32 15, 40 <i>; yields {i32}:result = 8</i>
+ &lt;result&gt; = and i32 4, 8 <i>; yields {i32}:result = 0</i>
+</pre>
+</div>
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_or">'<tt>or</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = or &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>or</tt>' instruction returns the bitwise logical inclusive or of its
+ two operands.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>or</tt>' instruction must be
+ <a href="#t_integer">integer</a> or <a href="#t_vector">vector</a> of integer
+ values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The truth table used for the '<tt>or</tt>' instruction is:</p>
+
+<table border="1" cellspacing="0" cellpadding="4">
+ <tbody>
+ <tr>
+ <th>In0</th>
+ <th>In1</th>
+ <th>Out</th>
+ </tr>
+ <tr>
+ <td>0</td>
+ <td>0</td>
+ <td>0</td>
+ </tr>
+ <tr>
+ <td>0</td>
+ <td>1</td>
+ <td>1</td>
+ </tr>
+ <tr>
+ <td>1</td>
+ <td>0</td>
+ <td>1</td>
+ </tr>
+ <tr>
+ <td>1</td>
+ <td>1</td>
+ <td>1</td>
+ </tr>
+ </tbody>
+</table>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = or i32 4, %var <i>; yields {i32}:result = 4 | %var</i>
+ &lt;result&gt; = or i32 15, 40 <i>; yields {i32}:result = 47</i>
+ &lt;result&gt; = or i32 4, 8 <i>; yields {i32}:result = 12</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_xor">'<tt>xor</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = xor &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {ty}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>xor</tt>' instruction returns the bitwise logical exclusive or of
+ its two operands. The <tt>xor</tt> is used to implement the "one's
+ complement" operation, which is the "~" operator in C.</p>
+
+<h5>Arguments:</h5>
+<p>The two arguments to the '<tt>xor</tt>' instruction must be
+ <a href="#t_integer">integer</a> or <a href="#t_vector">vector</a> of integer
+ values. Both arguments must have identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The truth table used for the '<tt>xor</tt>' instruction is:</p>
+
+<table border="1" cellspacing="0" cellpadding="4">
+ <tbody>
+ <tr>
+ <th>In0</th>
+ <th>In1</th>
+ <th>Out</th>
+ </tr>
+ <tr>
+ <td>0</td>
+ <td>0</td>
+ <td>0</td>
+ </tr>
+ <tr>
+ <td>0</td>
+ <td>1</td>
+ <td>1</td>
+ </tr>
+ <tr>
+ <td>1</td>
+ <td>0</td>
+ <td>1</td>
+ </tr>
+ <tr>
+ <td>1</td>
+ <td>1</td>
+ <td>0</td>
+ </tr>
+ </tbody>
+</table>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = xor i32 4, %var <i>; yields {i32}:result = 4 ^ %var</i>
+ &lt;result&gt; = xor i32 15, 40 <i>; yields {i32}:result = 39</i>
+ &lt;result&gt; = xor i32 4, 8 <i>; yields {i32}:result = 12</i>
+ &lt;result&gt; = xor i32 %V, -1 <i>; yields {i32}:result = ~%V</i>
+</pre>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="vectorops">Vector Operations</a>
+</h3>
+
+<div>
+
+<p>LLVM supports several instructions to represent vector operations in a
+ target-independent manner. These instructions cover the element-access and
+ vector-specific operations needed to process vectors effectively. While LLVM
+ does directly support these vector operations, many sophisticated algorithms
+ will want to use target-specific intrinsics to take full advantage of a
+ specific target.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_extractelement">'<tt>extractelement</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = extractelement &lt;n x &lt;ty&gt;&gt; &lt;val&gt;, i32 &lt;idx&gt; <i>; yields &lt;ty&gt;</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>extractelement</tt>' instruction extracts a single scalar element
+ from a vector at a specified index.</p>
+
+
+<h5>Arguments:</h5>
+<p>The first operand of an '<tt>extractelement</tt>' instruction is a value
+ of <a href="#t_vector">vector</a> type. The second operand is an index
+ indicating the position from which to extract the element. The index may be
+ a variable.</p>
+
+<h5>Semantics:</h5>
+<p>The result is a scalar of the same type as the element type of
+ <tt>val</tt>. Its value is the value at position <tt>idx</tt> of
+ <tt>val</tt>. If <tt>idx</tt> exceeds the length of <tt>val</tt>, the
+ results are undefined.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = extractelement &lt;4 x i32&gt; %vec, i32 0 <i>; yields i32</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_insertelement">'<tt>insertelement</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = insertelement &lt;n x &lt;ty&gt;&gt; &lt;val&gt;, &lt;ty&gt; &lt;elt&gt;, i32 &lt;idx&gt; <i>; yields &lt;n x &lt;ty&gt;&gt;</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>insertelement</tt>' instruction inserts a scalar element into a
+ vector at a specified index.</p>
+
+<h5>Arguments:</h5>
+<p>The first operand of an '<tt>insertelement</tt>' instruction is a value
+ of <a href="#t_vector">vector</a> type. The second operand is a scalar value
+ whose type must equal the element type of the first operand. The third
+ operand is an index indicating the position at which to insert the value.
+ The index may be a variable.</p>
+
+<h5>Semantics:</h5>
+<p>The result is a vector of the same type as <tt>val</tt>. Its element values
+ are those of <tt>val</tt> except at position <tt>idx</tt>, where it gets the
+ value <tt>elt</tt>. If <tt>idx</tt> exceeds the length of <tt>val</tt>, the
+ results are undefined.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = insertelement &lt;4 x i32&gt; %vec, i32 1, i32 0 <i>; yields &lt;4 x i32&gt;</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_shufflevector">'<tt>shufflevector</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = shufflevector &lt;n x &lt;ty&gt;&gt; &lt;v1&gt;, &lt;n x &lt;ty&gt;&gt; &lt;v2&gt;, &lt;m x i32&gt; &lt;mask&gt; <i>; yields &lt;m x &lt;ty&gt;&gt;</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>shufflevector</tt>' instruction constructs a permutation of elements
+ from two input vectors, returning a vector with the same element type as the
+ input and length that is the same as the shuffle mask.</p>
+
+<h5>Arguments:</h5>
+<p>The first two operands of a '<tt>shufflevector</tt>' instruction are vectors
+ with the same type. The third argument is a shuffle mask whose
+ element type is always 'i32'. The result of the instruction is a vector
+ whose length is the same as the shuffle mask and whose element type is the
+ same as the element type of the first two operands.</p>
+
+<p>The shuffle mask operand is required to be a constant vector with either
+ constant integer or undef values.</p>
+
+<h5>Semantics:</h5>
+<p>The elements of the two input vectors are numbered from left to right across
+ both of the vectors. The shuffle mask operand specifies, for each element of
+ the result vector, which element of the two input vectors the result element
+ gets. The element selector may be undef (meaning "don't care") and the
+ second operand may be undef if performing a shuffle from only one vector.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = shufflevector &lt;4 x i32&gt; %v1, &lt;4 x i32&gt; %v2,
+ &lt;4 x i32&gt; &lt;i32 0, i32 4, i32 1, i32 5&gt; <i>; yields &lt;4 x i32&gt;</i>
+ &lt;result&gt; = shufflevector &lt;4 x i32&gt; %v1, &lt;4 x i32&gt; undef,
+ &lt;4 x i32&gt; &lt;i32 0, i32 1, i32 2, i32 3&gt; <i>; yields &lt;4 x i32&gt;</i> - Identity shuffle.
+ &lt;result&gt; = shufflevector &lt;8 x i32&gt; %v1, &lt;8 x i32&gt; undef,
+ &lt;4 x i32&gt; &lt;i32 0, i32 1, i32 2, i32 3&gt; <i>; yields &lt;4 x i32&gt;</i>
+ &lt;result&gt; = shufflevector &lt;4 x i32&gt; %v1, &lt;4 x i32&gt; %v2,
+ &lt;8 x i32&gt; &lt;i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 &gt; <i>; yields &lt;8 x i32&gt;</i>
+</pre>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="aggregateops">Aggregate Operations</a>
+</h3>
+
+<div>
+
+<p>LLVM supports several instructions for working with
+ <a href="#t_aggregate">aggregate</a> values.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_extractvalue">'<tt>extractvalue</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = extractvalue &lt;aggregate type&gt; &lt;val&gt;, &lt;idx&gt;{, &lt;idx&gt;}*
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>extractvalue</tt>' instruction extracts the value of a member field
+ from an <a href="#t_aggregate">aggregate</a> value.</p>
+
+<h5>Arguments:</h5>
+<p>The first operand of an '<tt>extractvalue</tt>' instruction is a value
+ of <a href="#t_struct">struct</a> or
+ <a href="#t_array">array</a> type. The operands are constant indices to
+ specify which value to extract in a similar manner as indices in a
+ '<tt><a href="#i_getelementptr">getelementptr</a></tt>' instruction.</p>
+ <p>The major differences to <tt>getelementptr</tt> indexing are:</p>
+ <ul>
+ <li>Since the value being indexed is not a pointer, the first index is
+ omitted and assumed to be zero.</li>
+ <li>At least one index must be specified.</li>
+ <li>Not only struct indices but also array indices must be in
+ bounds.</li>
+ </ul>
+
+<h5>Semantics:</h5>
+<p>The result is the value at the position in the aggregate specified by the
+ index operands.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = extractvalue {i32, float} %agg, 0 <i>; yields i32</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_insertvalue">'<tt>insertvalue</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = insertvalue &lt;aggregate type&gt; &lt;val&gt;, &lt;ty&gt; &lt;elt&gt;, &lt;idx&gt;{, &lt;idx&gt;}* <i>; yields &lt;aggregate type&gt;</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>insertvalue</tt>' instruction inserts a value into a member field
+ in an <a href="#t_aggregate">aggregate</a> value.</p>
+
+<h5>Arguments:</h5>
+<p>The first operand of an '<tt>insertvalue</tt>' instruction is a value
+ of <a href="#t_struct">struct</a> or
+ <a href="#t_array">array</a> type. The second operand is a first-class
+ value to insert. The following operands are constant indices indicating
+ the position at which to insert the value in a similar manner as indices in a
+ '<tt><a href="#i_extractvalue">extractvalue</a></tt>' instruction. The
+ value to insert must have the same type as the value identified by the
+ indices.</p>
+
+<h5>Semantics:</h5>
+<p>The result is an aggregate of the same type as <tt>val</tt>. Its value is
+ that of <tt>val</tt> except that the value at the position specified by the
+ indices is that of <tt>elt</tt>.</p>
+
+<h5>Example:</h5>
+<pre>
+ %agg1 = insertvalue {i32, float} undef, i32 1, 0 <i>; yields {i32 1, float undef}</i>
+ %agg2 = insertvalue {i32, float} %agg1, float %val, 1 <i>; yields {i32 1, float %val}</i>
+ %agg3 = insertvalue {i32, {float}} %agg1, float %val, 1, 0 <i>; yields {i32 1, float %val}</i>
+</pre>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="memoryops">Memory Access and Addressing Operations</a>
+</h3>
+
+<div>
+
+<p>A key design point of an SSA-based representation is how it represents
+ memory. In LLVM, no memory locations are in SSA form, which makes things
+ very simple. This section describes how to read, write, and allocate
+ memory in LLVM.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_alloca">'<tt>alloca</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = alloca &lt;type&gt;[, &lt;ty&gt; &lt;NumElements&gt;][, align &lt;alignment&gt;] <i>; yields {type*}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>alloca</tt>' instruction allocates memory on the stack frame of the
+ currently executing function, to be automatically released when this function
+ returns to its caller. The object is always allocated in the generic address
+ space (address space zero).</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>alloca</tt>' instruction
+ allocates <tt>sizeof(&lt;type&gt;)*NumElements</tt> bytes of memory on the
+ runtime stack, returning a pointer of the appropriate type to the program.
+ If "NumElements" is specified, it is the number of elements allocated,
+ otherwise "NumElements" is defaulted to be one. If a constant alignment is
+ specified, the value result of the allocation is guaranteed to be aligned to
+ at least that boundary. If not specified, or if zero, the target can choose
+ to align the allocation on any convenient boundary compatible with the
+ type.</p>
+
+<p>'<tt>type</tt>' may be any sized type.</p>
+
+<h5>Semantics:</h5>
+<p>Memory is allocated; a pointer is returned. The operation is undefined if
+ there is insufficient stack space for the allocation. '<tt>alloca</tt>'d
+ memory is automatically released when the function returns. The
+ '<tt>alloca</tt>' instruction is commonly used to represent automatic
+ variables that must have an address available. When the function returns
+ (either with the <tt><a href="#i_ret">ret</a></tt>
+ or <tt><a href="#i_resume">resume</a></tt> instructions), the memory is
+ reclaimed. Allocating zero bytes is legal, but the result is undefined.
+ The order in which memory is allocated (ie., which way the stack grows) is
+ not specified.</p>
+
+<p>
+
+<h5>Example:</h5>
+<pre>
+ %ptr = alloca i32 <i>; yields {i32*}:ptr</i>
+ %ptr = alloca i32, i32 4 <i>; yields {i32*}:ptr</i>
+ %ptr = alloca i32, i32 4, align 1024 <i>; yields {i32*}:ptr</i>
+ %ptr = alloca i32, align 1024 <i>; yields {i32*}:ptr</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_load">'<tt>load</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = load [volatile] &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;][, !invariant.load !&lt;index&gt;]
+ &lt;result&gt; = load atomic [volatile] &lt;ty&gt;* &lt;pointer&gt; [singlethread] &lt;ordering&gt;, align &lt;alignment&gt;
+ !&lt;index&gt; = !{ i32 1 }
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>load</tt>' instruction is used to read from memory.</p>
+
+<h5>Arguments:</h5>
+<p>The argument to the '<tt>load</tt>' instruction specifies the memory address
+ from which to load. The pointer must point to
+ a <a href="#t_firstclass">first class</a> type. If the <tt>load</tt> is
+ marked as <tt>volatile</tt>, then the optimizer is not allowed to modify the
+ number or order of execution of this <tt>load</tt> with other <a
+ href="#volatile">volatile operations</a>.</p>
+
+<p>If the <code>load</code> is marked as <code>atomic</code>, it takes an extra
+ <a href="#ordering">ordering</a> and optional <code>singlethread</code>
+ argument. The <code>release</code> and <code>acq_rel</code> orderings are
+ not valid on <code>load</code> instructions. Atomic loads produce <a
+ href="#memorymodel">defined</a> results when they may see multiple atomic
+ stores. The type of the pointee must be an integer type whose bit width
+ is a power of two greater than or equal to eight and less than or equal
+ to a target-specific size limit. <code>align</code> must be explicitly
+ specified on atomic loads, and the load has undefined behavior if the
+ alignment is not set to a value which is at least the size in bytes of
+ the pointee. <code>!nontemporal</code> does not have any defined semantics
+ for atomic loads.</p>
+
+<p>The optional constant <tt>align</tt> argument specifies the alignment of the
+ operation (that is, the alignment of the memory address). A value of 0 or an
+ omitted <tt>align</tt> argument means that the operation has the preferential
+ alignment for the target. It is the responsibility of the code emitter to
+ ensure that the alignment information is correct. Overestimating the
+ alignment results in undefined behavior. Underestimating the alignment may
+ produce less efficient code. An alignment of 1 is always safe.</p>
+
+<p>The optional <tt>!nontemporal</tt> metadata must reference a single
+ metatadata name &lt;index&gt; corresponding to a metadata node with
+ one <tt>i32</tt> entry of value 1. The existence of
+ the <tt>!nontemporal</tt> metatadata on the instruction tells the optimizer
+ and code generator that this load is not expected to be reused in the cache.
+ The code generator may select special instructions to save cache bandwidth,
+ such as the <tt>MOVNT</tt> instruction on x86.</p>
+
+<p>The optional <tt>!invariant.load</tt> metadata must reference a single
+ metatadata name &lt;index&gt; corresponding to a metadata node with no
+ entries. The existence of the <tt>!invariant.load</tt> metatadata on the
+ instruction tells the optimizer and code generator that this load address
+ points to memory which does not change value during program execution.
+ The optimizer may then move this load around, for example, by hoisting it
+ out of loops using loop invariant code motion.</p>
+
+<h5>Semantics:</h5>
+<p>The location of memory pointed to is loaded. If the value being loaded is of
+ scalar type then the number of bytes read does not exceed the minimum number
+ of bytes needed to hold all bits of the type. For example, loading an
+ <tt>i24</tt> reads at most three bytes. When loading a value of a type like
+ <tt>i20</tt> with a size that is not an integral number of bytes, the result
+ is undefined if the value was not originally written using a store of the
+ same type.</p>
+
+<h5>Examples:</h5>
+<pre>
+ %ptr = <a href="#i_alloca">alloca</a> i32 <i>; yields {i32*}:ptr</i>
+ <a href="#i_store">store</a> i32 3, i32* %ptr <i>; yields {void}</i>
+ %val = load i32* %ptr <i>; yields {i32}:val = i32 3</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_store">'<tt>store</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ store [volatile] &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;] <i>; yields {void}</i>
+ store atomic [volatile] &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt; [singlethread] &lt;ordering&gt;, align &lt;alignment&gt; <i>; yields {void}</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>store</tt>' instruction is used to write to memory.</p>
+
+<h5>Arguments:</h5>
+<p>There are two arguments to the '<tt>store</tt>' instruction: a value to store
+ and an address at which to store it. The type of the
+ '<tt>&lt;pointer&gt;</tt>' operand must be a pointer to
+ the <a href="#t_firstclass">first class</a> type of the
+ '<tt>&lt;value&gt;</tt>' operand. If the <tt>store</tt> is marked as
+ <tt>volatile</tt>, then the optimizer is not allowed to modify the number or
+ order of execution of this <tt>store</tt> with other <a
+ href="#volatile">volatile operations</a>.</p>
+
+<p>If the <code>store</code> is marked as <code>atomic</code>, it takes an extra
+ <a href="#ordering">ordering</a> and optional <code>singlethread</code>
+ argument. The <code>acquire</code> and <code>acq_rel</code> orderings aren't
+ valid on <code>store</code> instructions. Atomic loads produce <a
+ href="#memorymodel">defined</a> results when they may see multiple atomic
+ stores. The type of the pointee must be an integer type whose bit width
+ is a power of two greater than or equal to eight and less than or equal
+ to a target-specific size limit. <code>align</code> must be explicitly
+ specified on atomic stores, and the store has undefined behavior if the
+ alignment is not set to a value which is at least the size in bytes of
+ the pointee. <code>!nontemporal</code> does not have any defined semantics
+ for atomic stores.</p>
+
+<p>The optional constant "align" argument specifies the alignment of the
+ operation (that is, the alignment of the memory address). A value of 0 or an
+ omitted "align" argument means that the operation has the preferential
+ alignment for the target. It is the responsibility of the code emitter to
+ ensure that the alignment information is correct. Overestimating the
+ alignment results in an undefined behavior. Underestimating the alignment may
+ produce less efficient code. An alignment of 1 is always safe.</p>
+
+<p>The optional !nontemporal metadata must reference a single metatadata
+ name &lt;index&gt; corresponding to a metadata node with one i32 entry of
+ value 1. The existence of the !nontemporal metatadata on the
+ instruction tells the optimizer and code generator that this load is
+ not expected to be reused in the cache. The code generator may
+ select special instructions to save cache bandwidth, such as the
+ MOVNT instruction on x86.</p>
+
+
+<h5>Semantics:</h5>
+<p>The contents of memory are updated to contain '<tt>&lt;value&gt;</tt>' at the
+ location specified by the '<tt>&lt;pointer&gt;</tt>' operand. If
+ '<tt>&lt;value&gt;</tt>' is of scalar type then the number of bytes written
+ does not exceed the minimum number of bytes needed to hold all bits of the
+ type. For example, storing an <tt>i24</tt> writes at most three bytes. When
+ writing a value of a type like <tt>i20</tt> with a size that is not an
+ integral number of bytes, it is unspecified what happens to the extra bits
+ that do not belong to the type, but they will typically be overwritten.</p>
+
+<h5>Example:</h5>
+<pre>
+ %ptr = <a href="#i_alloca">alloca</a> i32 <i>; yields {i32*}:ptr</i>
+ store i32 3, i32* %ptr <i>; yields {void}</i>
+ %val = <a href="#i_load">load</a> i32* %ptr <i>; yields {i32}:val = i32 3</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+<a name="i_fence">'<tt>fence</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ fence [singlethread] &lt;ordering&gt; <i>; yields {void}</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>fence</tt>' instruction is used to introduce happens-before edges
+between operations.</p>
+
+<h5>Arguments:</h5> <p>'<code>fence</code>' instructions take an <a
+href="#ordering">ordering</a> argument which defines what
+<i>synchronizes-with</i> edges they add. They can only be given
+<code>acquire</code>, <code>release</code>, <code>acq_rel</code>, and
+<code>seq_cst</code> orderings.</p>
+
+<h5>Semantics:</h5>
+<p>A fence <var>A</var> which has (at least) <code>release</code> ordering
+semantics <i>synchronizes with</i> a fence <var>B</var> with (at least)
+<code>acquire</code> ordering semantics if and only if there exist atomic
+operations <var>X</var> and <var>Y</var>, both operating on some atomic object
+<var>M</var>, such that <var>A</var> is sequenced before <var>X</var>,
+<var>X</var> modifies <var>M</var> (either directly or through some side effect
+of a sequence headed by <var>X</var>), <var>Y</var> is sequenced before
+<var>B</var>, and <var>Y</var> observes <var>M</var>. This provides a
+<i>happens-before</i> dependency between <var>A</var> and <var>B</var>. Rather
+than an explicit <code>fence</code>, one (but not both) of the atomic operations
+<var>X</var> or <var>Y</var> might provide a <code>release</code> or
+<code>acquire</code> (resp.) ordering constraint and still
+<i>synchronize-with</i> the explicit <code>fence</code> and establish the
+<i>happens-before</i> edge.</p>
+
+<p>A <code>fence</code> which has <code>seq_cst</code> ordering, in addition to
+having both <code>acquire</code> and <code>release</code> semantics specified
+above, participates in the global program order of other <code>seq_cst</code>
+operations and/or fences.</p>
+
+<p>The optional "<a href="#singlethread"><code>singlethread</code></a>" argument
+specifies that the fence only synchronizes with other fences in the same
+thread. (This is useful for interacting with signal handlers.)</p>
+
+<h5>Example:</h5>
+<pre>
+ fence acquire <i>; yields {void}</i>
+ fence singlethread seq_cst <i>; yields {void}</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+<a name="i_cmpxchg">'<tt>cmpxchg</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ cmpxchg [volatile] &lt;ty&gt;* &lt;pointer&gt;, &lt;ty&gt; &lt;cmp&gt;, &lt;ty&gt; &lt;new&gt; [singlethread] &lt;ordering&gt; <i>; yields {ty}</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>cmpxchg</tt>' instruction is used to atomically modify memory.
+It loads a value in memory and compares it to a given value. If they are
+equal, it stores a new value into the memory.</p>
+
+<h5>Arguments:</h5>
+<p>There are three arguments to the '<code>cmpxchg</code>' instruction: an
+address to operate on, a value to compare to the value currently be at that
+address, and a new value to place at that address if the compared values are
+equal. The type of '<var>&lt;cmp&gt;</var>' must be an integer type whose
+bit width is a power of two greater than or equal to eight and less than
+or equal to a target-specific size limit. '<var>&lt;cmp&gt;</var>' and
+'<var>&lt;new&gt;</var>' must have the same type, and the type of
+'<var>&lt;pointer&gt;</var>' must be a pointer to that type. If the
+<code>cmpxchg</code> is marked as <code>volatile</code>, then the
+optimizer is not allowed to modify the number or order of execution
+of this <code>cmpxchg</code> with other <a href="#volatile">volatile
+operations</a>.</p>
+
+<!-- FIXME: Extend allowed types. -->
+
+<p>The <a href="#ordering"><var>ordering</var></a> argument specifies how this
+<code>cmpxchg</code> synchronizes with other atomic operations.</p>
+
+<p>The optional "<code>singlethread</code>" argument declares that the
+<code>cmpxchg</code> is only atomic with respect to code (usually signal
+handlers) running in the same thread as the <code>cmpxchg</code>. Otherwise the
+cmpxchg is atomic with respect to all other code in the system.</p>
+
+<p>The pointer passed into cmpxchg must have alignment greater than or equal to
+the size in memory of the operand.
+
+<h5>Semantics:</h5>
+<p>The contents of memory at the location specified by the
+'<tt>&lt;pointer&gt;</tt>' operand is read and compared to
+'<tt>&lt;cmp&gt;</tt>'; if the read value is the equal,
+'<tt>&lt;new&gt;</tt>' is written. The original value at the location
+is returned.
+
+<p>A successful <code>cmpxchg</code> is a read-modify-write instruction for the
+purpose of identifying <a href="#release_sequence">release sequences</a>. A
+failed <code>cmpxchg</code> is equivalent to an atomic load with an ordering
+parameter determined by dropping any <code>release</code> part of the
+<code>cmpxchg</code>'s ordering.</p>
+
+<!--
+FIXME: Is compare_exchange_weak() necessary? (Consider after we've done
+optimization work on ARM.)
+
+FIXME: Is a weaker ordering constraint on failure helpful in practice?
+-->
+
+<h5>Example:</h5>
+<pre>
+entry:
+ %orig = atomic <a href="#i_load">load</a> i32* %ptr unordered <i>; yields {i32}</i>
+ <a href="#i_br">br</a> label %loop
+
+loop:
+ %cmp = <a href="#i_phi">phi</a> i32 [ %orig, %entry ], [%old, %loop]
+ %squared = <a href="#i_mul">mul</a> i32 %cmp, %cmp
+ %old = cmpxchg i32* %ptr, i32 %cmp, i32 %squared <i>; yields {i32}</i>
+ %success = <a href="#i_icmp">icmp</a> eq i32 %cmp, %old
+ <a href="#i_br">br</a> i1 %success, label %done, label %loop
+
+done:
+ ...
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+<a name="i_atomicrmw">'<tt>atomicrmw</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ atomicrmw [volatile] &lt;operation&gt; &lt;ty&gt;* &lt;pointer&gt;, &lt;ty&gt; &lt;value&gt; [singlethread] &lt;ordering&gt; <i>; yields {ty}</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>atomicrmw</tt>' instruction is used to atomically modify memory.</p>
+
+<h5>Arguments:</h5>
+<p>There are three arguments to the '<code>atomicrmw</code>' instruction: an
+operation to apply, an address whose value to modify, an argument to the
+operation. The operation must be one of the following keywords:</p>
+<ul>
+ <li>xchg</li>
+ <li>add</li>
+ <li>sub</li>
+ <li>and</li>
+ <li>nand</li>
+ <li>or</li>
+ <li>xor</li>
+ <li>max</li>
+ <li>min</li>
+ <li>umax</li>
+ <li>umin</li>
+</ul>
+
+<p>The type of '<var>&lt;value&gt;</var>' must be an integer type whose
+bit width is a power of two greater than or equal to eight and less than
+or equal to a target-specific size limit. The type of the
+'<code>&lt;pointer&gt;</code>' operand must be a pointer to that type.
+If the <code>atomicrmw</code> is marked as <code>volatile</code>, then the
+optimizer is not allowed to modify the number or order of execution of this
+<code>atomicrmw</code> with other <a href="#volatile">volatile
+ operations</a>.</p>
+
+<!-- FIXME: Extend allowed types. -->
+
+<h5>Semantics:</h5>
+<p>The contents of memory at the location specified by the
+'<tt>&lt;pointer&gt;</tt>' operand are atomically read, modified, and written
+back. The original value at the location is returned. The modification is
+specified by the <var>operation</var> argument:</p>
+
+<ul>
+ <li>xchg: <code>*ptr = val</code></li>
+ <li>add: <code>*ptr = *ptr + val</code></li>
+ <li>sub: <code>*ptr = *ptr - val</code></li>
+ <li>and: <code>*ptr = *ptr &amp; val</code></li>
+ <li>nand: <code>*ptr = ~(*ptr &amp; val)</code></li>
+ <li>or: <code>*ptr = *ptr | val</code></li>
+ <li>xor: <code>*ptr = *ptr ^ val</code></li>
+ <li>max: <code>*ptr = *ptr &gt; val ? *ptr : val</code> (using a signed comparison)</li>
+ <li>min: <code>*ptr = *ptr &lt; val ? *ptr : val</code> (using a signed comparison)</li>
+ <li>umax: <code>*ptr = *ptr &gt; val ? *ptr : val</code> (using an unsigned comparison)</li>
+ <li>umin: <code>*ptr = *ptr &lt; val ? *ptr : val</code> (using an unsigned comparison)</li>
+</ul>
+
+<h5>Example:</h5>
+<pre>
+ %old = atomicrmw add i32* %ptr, i32 1 acquire <i>; yields {i32}</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_getelementptr">'<tt>getelementptr</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = getelementptr &lt;pty&gt;* &lt;ptrval&gt;{, &lt;ty&gt; &lt;idx&gt;}*
+ &lt;result&gt; = getelementptr inbounds &lt;pty&gt;* &lt;ptrval&gt;{, &lt;ty&gt; &lt;idx&gt;}*
+ &lt;result&gt; = getelementptr &lt;ptr vector&gt; ptrval, &lt;vector index type&gt; idx
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>getelementptr</tt>' instruction is used to get the address of a
+ subelement of an <a href="#t_aggregate">aggregate</a> data structure.
+ It performs address calculation only and does not access memory.</p>
+
+<h5>Arguments:</h5>
+<p>The first argument is always a pointer or a vector of pointers,
+ and forms the basis of the
+ calculation. The remaining arguments are indices that indicate which of the
+ elements of the aggregate object are indexed. The interpretation of each
+ index is dependent on the type being indexed into. The first index always
+ indexes the pointer value given as the first argument, the second index
+ indexes a value of the type pointed to (not necessarily the value directly
+ pointed to, since the first index can be non-zero), etc. The first type
+ indexed into must be a pointer value, subsequent types can be arrays,
+ vectors, and structs. Note that subsequent types being indexed into
+ can never be pointers, since that would require loading the pointer before
+ continuing calculation.</p>
+
+<p>The type of each index argument depends on the type it is indexing into.
+ When indexing into a (optionally packed) structure, only <tt>i32</tt>
+ integer <b>constants</b> are allowed. When indexing into an array, pointer
+ or vector, integers of any width are allowed, and they are not required to be
+ constant. These integers are treated as signed values where relevant.</p>
+
+<p>For example, let's consider a C code fragment and how it gets compiled to
+ LLVM:</p>
+
+<pre class="doc_code">
+struct RT {
+ char A;
+ int B[10][20];
+ char C;
+};
+struct ST {
+ int X;
+ double Y;
+ struct RT Z;
+};
+
+int *foo(struct ST *s) {
+ return &amp;s[1].Z.B[5][13];
+}
+</pre>
+
+<p>The LLVM code generated by Clang is:</p>
+
+<pre class="doc_code">
+%struct.RT = <a href="#namedtypes">type</a> { i8, [10 x [20 x i32]], i8 }
+%struct.ST = <a href="#namedtypes">type</a> { i32, double, %struct.RT }
+
+define i32* @foo(%struct.ST* %s) nounwind uwtable readnone optsize ssp {
+entry:
+ %arrayidx = getelementptr inbounds %struct.ST* %s, i64 1, i32 2, i32 1, i64 5, i64 13
+ ret i32* %arrayidx
+}
+</pre>
+
+<h5>Semantics:</h5>
+<p>In the example above, the first index is indexing into the
+ '<tt>%struct.ST*</tt>' type, which is a pointer, yielding a
+ '<tt>%struct.ST</tt>' = '<tt>{ i32, double, %struct.RT }</tt>' type, a
+ structure. The second index indexes into the third element of the structure,
+ yielding a '<tt>%struct.RT</tt>' = '<tt>{ i8 , [10 x [20 x i32]], i8 }</tt>'
+ type, another structure. The third index indexes into the second element of
+ the structure, yielding a '<tt>[10 x [20 x i32]]</tt>' type, an array. The
+ two dimensions of the array are subscripted into, yielding an '<tt>i32</tt>'
+ type. The '<tt>getelementptr</tt>' instruction returns a pointer to this
+ element, thus computing a value of '<tt>i32*</tt>' type.</p>
+
+<p>Note that it is perfectly legal to index partially through a structure,
+ returning a pointer to an inner element. Because of this, the LLVM code for
+ the given testcase is equivalent to:</p>
+
+<pre class="doc_code">
+define i32* @foo(%struct.ST* %s) {
+ %t1 = getelementptr %struct.ST* %s, i32 1 <i>; yields %struct.ST*:%t1</i>
+ %t2 = getelementptr %struct.ST* %t1, i32 0, i32 2 <i>; yields %struct.RT*:%t2</i>
+ %t3 = getelementptr %struct.RT* %t2, i32 0, i32 1 <i>; yields [10 x [20 x i32]]*:%t3</i>
+ %t4 = getelementptr [10 x [20 x i32]]* %t3, i32 0, i32 5 <i>; yields [20 x i32]*:%t4</i>
+ %t5 = getelementptr [20 x i32]* %t4, i32 0, i32 13 <i>; yields i32*:%t5</i>
+ ret i32* %t5
+}
+</pre>
+
+<p>If the <tt>inbounds</tt> keyword is present, the result value of the
+ <tt>getelementptr</tt> is a <a href="#poisonvalues">poison value</a> if the
+ base pointer is not an <i>in bounds</i> address of an allocated object,
+ or if any of the addresses that would be formed by successive addition of
+ the offsets implied by the indices to the base address with infinitely
+ precise signed arithmetic are not an <i>in bounds</i> address of that
+ allocated object. The <i>in bounds</i> addresses for an allocated object
+ are all the addresses that point into the object, plus the address one
+ byte past the end.
+ In cases where the base is a vector of pointers the <tt>inbounds</tt> keyword
+ applies to each of the computations element-wise. </p>
+
+<p>If the <tt>inbounds</tt> keyword is not present, the offsets are added to
+ the base address with silently-wrapping two's complement arithmetic. If the
+ offsets have a different width from the pointer, they are sign-extended or
+ truncated to the width of the pointer. The result value of the
+ <tt>getelementptr</tt> may be outside the object pointed to by the base
+ pointer. The result value may not necessarily be used to access memory
+ though, even if it happens to point into allocated storage. See the
+ <a href="#pointeraliasing">Pointer Aliasing Rules</a> section for more
+ information.</p>
+
+<p>The getelementptr instruction is often confusing. For some more insight into
+ how it works, see <a href="GetElementPtr.html">the getelementptr FAQ</a>.</p>
+
+<h5>Example:</h5>
+<pre>
+ <i>; yields [12 x i8]*:aptr</i>
+ %aptr = getelementptr {i32, [12 x i8]}* %saptr, i64 0, i32 1
+ <i>; yields i8*:vptr</i>
+ %vptr = getelementptr {i32, &lt;2 x i8&gt;}* %svptr, i64 0, i32 1, i32 1
+ <i>; yields i8*:eptr</i>
+ %eptr = getelementptr [12 x i8]* %aptr, i64 0, i32 1
+ <i>; yields i32*:iptr</i>
+ %iptr = getelementptr [10 x i32]* @arr, i16 0, i16 0
+</pre>
+
+<p>In cases where the pointer argument is a vector of pointers, only a
+ single index may be used, and the number of vector elements has to be
+ the same. For example: </p>
+<pre class="doc_code">
+ %A = getelementptr <4 x i8*> %ptrs, <4 x i64> %offsets,
+</pre>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="convertops">Conversion Operations</a>
+</h3>
+
+<div>
+
+<p>The instructions in this category are the conversion instructions (casting)
+ which all take a single operand and a type. They perform various bit
+ conversions on the operand.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_trunc">'<tt>trunc .. to</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = trunc &lt;ty&gt; &lt;value&gt; to &lt;ty2&gt; <i>; yields ty2</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>trunc</tt>' instruction truncates its operand to the
+ type <tt>ty2</tt>.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>trunc</tt>' instruction takes a value to trunc, and a type to trunc it to.
+ Both types must be of <a href="#t_integer">integer</a> types, or vectors
+ of the same number of integers.
+ The bit size of the <tt>value</tt> must be larger than
+ the bit size of the destination type, <tt>ty2</tt>.
+ Equal sized types are not allowed.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>trunc</tt>' instruction truncates the high order bits
+ in <tt>value</tt> and converts the remaining bits to <tt>ty2</tt>. Since the
+ source size must be larger than the destination size, <tt>trunc</tt> cannot
+ be a <i>no-op cast</i>. It will always truncate bits.</p>
+
+<h5>Example:</h5>
+<pre>
+ %X = trunc i32 257 to i8 <i>; yields i8:1</i>
+ %Y = trunc i32 123 to i1 <i>; yields i1:true</i>
+ %Z = trunc i32 122 to i1 <i>; yields i1:false</i>
+ %W = trunc &lt;2 x i16&gt; &lt;i16 8, i16 7&gt; to &lt;2 x i8&gt; <i>; yields &lt;i8 8, i8 7&gt;</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_zext">'<tt>zext .. to</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = zext &lt;ty&gt; &lt;value&gt; to &lt;ty2&gt; <i>; yields ty2</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>zext</tt>' instruction zero extends its operand to type
+ <tt>ty2</tt>.</p>
+
+
+<h5>Arguments:</h5>
+<p>The '<tt>zext</tt>' instruction takes a value to cast, and a type to cast it to.
+ Both types must be of <a href="#t_integer">integer</a> types, or vectors
+ of the same number of integers.
+ The bit size of the <tt>value</tt> must be smaller than
+ the bit size of the destination type,
+ <tt>ty2</tt>.</p>
+
+<h5>Semantics:</h5>
+<p>The <tt>zext</tt> fills the high order bits of the <tt>value</tt> with zero
+ bits until it reaches the size of the destination type, <tt>ty2</tt>.</p>
+
+<p>When zero extending from i1, the result will always be either 0 or 1.</p>
+
+<h5>Example:</h5>
+<pre>
+ %X = zext i32 257 to i64 <i>; yields i64:257</i>
+ %Y = zext i1 true to i32 <i>; yields i32:1</i>
+ %Z = zext &lt;2 x i16&gt; &lt;i16 8, i16 7&gt; to &lt;2 x i32&gt; <i>; yields &lt;i32 8, i32 7&gt;</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_sext">'<tt>sext .. to</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = sext &lt;ty&gt; &lt;value&gt; to &lt;ty2&gt; <i>; yields ty2</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>sext</tt>' sign extends <tt>value</tt> to the type <tt>ty2</tt>.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>sext</tt>' instruction takes a value to cast, and a type to cast it to.
+ Both types must be of <a href="#t_integer">integer</a> types, or vectors
+ of the same number of integers.
+ The bit size of the <tt>value</tt> must be smaller than
+ the bit size of the destination type,
+ <tt>ty2</tt>.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>sext</tt>' instruction performs a sign extension by copying the sign
+ bit (highest order bit) of the <tt>value</tt> until it reaches the bit size
+ of the type <tt>ty2</tt>.</p>
+
+<p>When sign extending from i1, the extension always results in -1 or 0.</p>
+
+<h5>Example:</h5>
+<pre>
+ %X = sext i8 -1 to i16 <i>; yields i16 :65535</i>
+ %Y = sext i1 true to i32 <i>; yields i32:-1</i>
+ %Z = sext &lt;2 x i16&gt; &lt;i16 8, i16 7&gt; to &lt;2 x i32&gt; <i>; yields &lt;i32 8, i32 7&gt;</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_fptrunc">'<tt>fptrunc .. to</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = fptrunc &lt;ty&gt; &lt;value&gt; to &lt;ty2&gt; <i>; yields ty2</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>fptrunc</tt>' instruction truncates <tt>value</tt> to type
+ <tt>ty2</tt>.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>fptrunc</tt>' instruction takes a <a href="#t_floating">floating
+ point</a> value to cast and a <a href="#t_floating">floating point</a> type
+ to cast it to. The size of <tt>value</tt> must be larger than the size of
+ <tt>ty2</tt>. This implies that <tt>fptrunc</tt> cannot be used to make a
+ <i>no-op cast</i>.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>fptrunc</tt>' instruction truncates a <tt>value</tt> from a larger
+ <a href="#t_floating">floating point</a> type to a smaller
+ <a href="#t_floating">floating point</a> type. If the value cannot fit
+ within the destination type, <tt>ty2</tt>, then the results are
+ undefined.</p>
+
+<h5>Example:</h5>
+<pre>
+ %X = fptrunc double 123.0 to float <i>; yields float:123.0</i>
+ %Y = fptrunc double 1.0E+300 to float <i>; yields undefined</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_fpext">'<tt>fpext .. to</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = fpext &lt;ty&gt; &lt;value&gt; to &lt;ty2&gt; <i>; yields ty2</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>fpext</tt>' extends a floating point <tt>value</tt> to a larger
+ floating point value.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>fpext</tt>' instruction takes a
+ <a href="#t_floating">floating point</a> <tt>value</tt> to cast, and
+ a <a href="#t_floating">floating point</a> type to cast it to. The source
+ type must be smaller than the destination type.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>fpext</tt>' instruction extends the <tt>value</tt> from a smaller
+ <a href="#t_floating">floating point</a> type to a larger
+ <a href="#t_floating">floating point</a> type. The <tt>fpext</tt> cannot be
+ used to make a <i>no-op cast</i> because it always changes bits. Use
+ <tt>bitcast</tt> to make a <i>no-op cast</i> for a floating point cast.</p>
+
+<h5>Example:</h5>
+<pre>
+ %X = fpext float 3.125 to double <i>; yields double:3.125000e+00</i>
+ %Y = fpext double %X to fp128 <i>; yields fp128:0xL00000000000000004000900000000000</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_fptoui">'<tt>fptoui .. to</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = fptoui &lt;ty&gt; &lt;value&gt; to &lt;ty2&gt; <i>; yields ty2</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>fptoui</tt>' converts a floating point <tt>value</tt> to its
+ unsigned integer equivalent of type <tt>ty2</tt>.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>fptoui</tt>' instruction takes a value to cast, which must be a
+ scalar or vector <a href="#t_floating">floating point</a> value, and a type
+ to cast it to <tt>ty2</tt>, which must be an <a href="#t_integer">integer</a>
+ type. If <tt>ty</tt> is a vector floating point type, <tt>ty2</tt> must be a
+ vector integer type with the same number of elements as <tt>ty</tt></p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>fptoui</tt>' instruction converts its
+ <a href="#t_floating">floating point</a> operand into the nearest (rounding
+ towards zero) unsigned integer value. If the value cannot fit
+ in <tt>ty2</tt>, the results are undefined.</p>
+
+<h5>Example:</h5>
+<pre>
+ %X = fptoui double 123.0 to i32 <i>; yields i32:123</i>
+ %Y = fptoui float 1.0E+300 to i1 <i>; yields undefined:1</i>
+ %Z = fptoui float 1.04E+17 to i8 <i>; yields undefined:1</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_fptosi">'<tt>fptosi .. to</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = fptosi &lt;ty&gt; &lt;value&gt; to &lt;ty2&gt; <i>; yields ty2</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>fptosi</tt>' instruction converts
+ <a href="#t_floating">floating point</a> <tt>value</tt> to
+ type <tt>ty2</tt>.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>fptosi</tt>' instruction takes a value to cast, which must be a
+ scalar or vector <a href="#t_floating">floating point</a> value, and a type
+ to cast it to <tt>ty2</tt>, which must be an <a href="#t_integer">integer</a>
+ type. If <tt>ty</tt> is a vector floating point type, <tt>ty2</tt> must be a
+ vector integer type with the same number of elements as <tt>ty</tt></p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>fptosi</tt>' instruction converts its
+ <a href="#t_floating">floating point</a> operand into the nearest (rounding
+ towards zero) signed integer value. If the value cannot fit in <tt>ty2</tt>,
+ the results are undefined.</p>
+
+<h5>Example:</h5>
+<pre>
+ %X = fptosi double -123.0 to i32 <i>; yields i32:-123</i>
+ %Y = fptosi float 1.0E-247 to i1 <i>; yields undefined:1</i>
+ %Z = fptosi float 1.04E+17 to i8 <i>; yields undefined:1</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_uitofp">'<tt>uitofp .. to</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = uitofp &lt;ty&gt; &lt;value&gt; to &lt;ty2&gt; <i>; yields ty2</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>uitofp</tt>' instruction regards <tt>value</tt> as an unsigned
+ integer and converts that value to the <tt>ty2</tt> type.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>uitofp</tt>' instruction takes a value to cast, which must be a
+ scalar or vector <a href="#t_integer">integer</a> value, and a type to cast
+ it to <tt>ty2</tt>, which must be an <a href="#t_floating">floating point</a>
+ type. If <tt>ty</tt> is a vector integer type, <tt>ty2</tt> must be a vector
+ floating point type with the same number of elements as <tt>ty</tt></p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>uitofp</tt>' instruction interprets its operand as an unsigned
+ integer quantity and converts it to the corresponding floating point
+ value. If the value cannot fit in the floating point value, the results are
+ undefined.</p>
+
+<h5>Example:</h5>
+<pre>
+ %X = uitofp i32 257 to float <i>; yields float:257.0</i>
+ %Y = uitofp i8 -1 to double <i>; yields double:255.0</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_sitofp">'<tt>sitofp .. to</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = sitofp &lt;ty&gt; &lt;value&gt; to &lt;ty2&gt; <i>; yields ty2</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>sitofp</tt>' instruction regards <tt>value</tt> as a signed integer
+ and converts that value to the <tt>ty2</tt> type.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>sitofp</tt>' instruction takes a value to cast, which must be a
+ scalar or vector <a href="#t_integer">integer</a> value, and a type to cast
+ it to <tt>ty2</tt>, which must be an <a href="#t_floating">floating point</a>
+ type. If <tt>ty</tt> is a vector integer type, <tt>ty2</tt> must be a vector
+ floating point type with the same number of elements as <tt>ty</tt></p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>sitofp</tt>' instruction interprets its operand as a signed integer
+ quantity and converts it to the corresponding floating point value. If the
+ value cannot fit in the floating point value, the results are undefined.</p>
+
+<h5>Example:</h5>
+<pre>
+ %X = sitofp i32 257 to float <i>; yields float:257.0</i>
+ %Y = sitofp i8 -1 to double <i>; yields double:-1.0</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_ptrtoint">'<tt>ptrtoint .. to</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = ptrtoint &lt;ty&gt; &lt;value&gt; to &lt;ty2&gt; <i>; yields ty2</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>ptrtoint</tt>' instruction converts the pointer or a vector of
+ pointers <tt>value</tt> to
+ the integer (or vector of integers) type <tt>ty2</tt>.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>ptrtoint</tt>' instruction takes a <tt>value</tt> to cast, which
+ must be a a value of type <a href="#t_pointer">pointer</a> or a vector of
+ pointers, and a type to cast it to
+ <tt>ty2</tt>, which must be an <a href="#t_integer">integer</a> or a vector
+ of integers type.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>ptrtoint</tt>' instruction converts <tt>value</tt> to integer type
+ <tt>ty2</tt> by interpreting the pointer value as an integer and either
+ truncating or zero extending that value to the size of the integer type. If
+ <tt>value</tt> is smaller than <tt>ty2</tt> then a zero extension is done. If
+ <tt>value</tt> is larger than <tt>ty2</tt> then a truncation is done. If they
+ are the same size, then nothing is done (<i>no-op cast</i>) other than a type
+ change.</p>
+
+<h5>Example:</h5>
+<pre>
+ %X = ptrtoint i32* %P to i8 <i>; yields truncation on 32-bit architecture</i>
+ %Y = ptrtoint i32* %P to i64 <i>; yields zero extension on 32-bit architecture</i>
+ %Z = ptrtoint &lt;4 x i32*&gt; %P to &lt;4 x i64&gt;<i>; yields vector zero extension for a vector of addresses on 32-bit architecture</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_inttoptr">'<tt>inttoptr .. to</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = inttoptr &lt;ty&gt; &lt;value&gt; to &lt;ty2&gt; <i>; yields ty2</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>inttoptr</tt>' instruction converts an integer <tt>value</tt> to a
+ pointer type, <tt>ty2</tt>.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>inttoptr</tt>' instruction takes an <a href="#t_integer">integer</a>
+ value to cast, and a type to cast it to, which must be a
+ <a href="#t_pointer">pointer</a> type.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>inttoptr</tt>' instruction converts <tt>value</tt> to type
+ <tt>ty2</tt> by applying either a zero extension or a truncation depending on
+ the size of the integer <tt>value</tt>. If <tt>value</tt> is larger than the
+ size of a pointer then a truncation is done. If <tt>value</tt> is smaller
+ than the size of a pointer then a zero extension is done. If they are the
+ same size, nothing is done (<i>no-op cast</i>).</p>
+
+<h5>Example:</h5>
+<pre>
+ %X = inttoptr i32 255 to i32* <i>; yields zero extension on 64-bit architecture</i>
+ %Y = inttoptr i32 255 to i32* <i>; yields no-op on 32-bit architecture</i>
+ %Z = inttoptr i64 0 to i32* <i>; yields truncation on 32-bit architecture</i>
+ %Z = inttoptr &lt;4 x i32&gt; %G to &lt;4 x i8*&gt;<i>; yields truncation of vector G to four pointers</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_bitcast">'<tt>bitcast .. to</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = bitcast &lt;ty&gt; &lt;value&gt; to &lt;ty2&gt; <i>; yields ty2</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>bitcast</tt>' instruction converts <tt>value</tt> to type
+ <tt>ty2</tt> without changing any bits.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>bitcast</tt>' instruction takes a value to cast, which must be a
+ non-aggregate first class value, and a type to cast it to, which must also be
+ a non-aggregate <a href="#t_firstclass">first class</a> type. The bit sizes
+ of <tt>value</tt> and the destination type, <tt>ty2</tt>, must be
+ identical. If the source type is a pointer, the destination type must also be
+ a pointer. This instruction supports bitwise conversion of vectors to
+ integers and to vectors of other types (as long as they have the same
+ size).</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>bitcast</tt>' instruction converts <tt>value</tt> to type
+ <tt>ty2</tt>. It is always a <i>no-op cast</i> because no bits change with
+ this conversion. The conversion is done as if the <tt>value</tt> had been
+ stored to memory and read back as type <tt>ty2</tt>.
+ Pointer (or vector of pointers) types may only be converted to other pointer
+ (or vector of pointers) types with this instruction. To convert
+ pointers to other types, use the <a href="#i_inttoptr">inttoptr</a> or
+ <a href="#i_ptrtoint">ptrtoint</a> instructions first.</p>
+
+<h5>Example:</h5>
+<pre>
+ %X = bitcast i8 255 to i8 <i>; yields i8 :-1</i>
+ %Y = bitcast i32* %x to sint* <i>; yields sint*:%x</i>
+ %Z = bitcast &lt;2 x int&gt; %V to i64; <i>; yields i64: %V</i>
+ %Z = bitcast &lt;2 x i32*&gt; %V to &lt;2 x i64*&gt; <i>; yields &lt;2 x i64*&gt;</i>
+</pre>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="otherops">Other Operations</a>
+</h3>
+
+<div>
+
+<p>The instructions in this category are the "miscellaneous" instructions, which
+ defy better classification.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_icmp">'<tt>icmp</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = icmp &lt;cond&gt; &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {i1} or {&lt;N x i1&gt;}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>icmp</tt>' instruction returns a boolean value or a vector of
+ boolean values based on comparison of its two integer, integer vector,
+ pointer, or pointer vector operands.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>icmp</tt>' instruction takes three operands. The first operand is
+ the condition code indicating the kind of comparison to perform. It is not a
+ value, just a keyword. The possible condition code are:</p>
+
+<ol>
+ <li><tt>eq</tt>: equal</li>
+ <li><tt>ne</tt>: not equal </li>
+ <li><tt>ugt</tt>: unsigned greater than</li>
+ <li><tt>uge</tt>: unsigned greater or equal</li>
+ <li><tt>ult</tt>: unsigned less than</li>
+ <li><tt>ule</tt>: unsigned less or equal</li>
+ <li><tt>sgt</tt>: signed greater than</li>
+ <li><tt>sge</tt>: signed greater or equal</li>
+ <li><tt>slt</tt>: signed less than</li>
+ <li><tt>sle</tt>: signed less or equal</li>
+</ol>
+
+<p>The remaining two arguments must be <a href="#t_integer">integer</a> or
+ <a href="#t_pointer">pointer</a> or integer <a href="#t_vector">vector</a>
+ typed. They must also be identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>icmp</tt>' compares <tt>op1</tt> and <tt>op2</tt> according to the
+ condition code given as <tt>cond</tt>. The comparison performed always yields
+ either an <a href="#t_integer"><tt>i1</tt></a> or vector of <tt>i1</tt>
+ result, as follows:</p>
+
+<ol>
+ <li><tt>eq</tt>: yields <tt>true</tt> if the operands are equal,
+ <tt>false</tt> otherwise. No sign interpretation is necessary or
+ performed.</li>
+
+ <li><tt>ne</tt>: yields <tt>true</tt> if the operands are unequal,
+ <tt>false</tt> otherwise. No sign interpretation is necessary or
+ performed.</li>
+
+ <li><tt>ugt</tt>: interprets the operands as unsigned values and yields
+ <tt>true</tt> if <tt>op1</tt> is greater than <tt>op2</tt>.</li>
+
+ <li><tt>uge</tt>: interprets the operands as unsigned values and yields
+ <tt>true</tt> if <tt>op1</tt> is greater than or equal
+ to <tt>op2</tt>.</li>
+
+ <li><tt>ult</tt>: interprets the operands as unsigned values and yields
+ <tt>true</tt> if <tt>op1</tt> is less than <tt>op2</tt>.</li>
+
+ <li><tt>ule</tt>: interprets the operands as unsigned values and yields
+ <tt>true</tt> if <tt>op1</tt> is less than or equal to <tt>op2</tt>.</li>
+
+ <li><tt>sgt</tt>: interprets the operands as signed values and yields
+ <tt>true</tt> if <tt>op1</tt> is greater than <tt>op2</tt>.</li>
+
+ <li><tt>sge</tt>: interprets the operands as signed values and yields
+ <tt>true</tt> if <tt>op1</tt> is greater than or equal
+ to <tt>op2</tt>.</li>
+
+ <li><tt>slt</tt>: interprets the operands as signed values and yields
+ <tt>true</tt> if <tt>op1</tt> is less than <tt>op2</tt>.</li>
+
+ <li><tt>sle</tt>: interprets the operands as signed values and yields
+ <tt>true</tt> if <tt>op1</tt> is less than or equal to <tt>op2</tt>.</li>
+</ol>
+
+<p>If the operands are <a href="#t_pointer">pointer</a> typed, the pointer
+ values are compared as if they were integers.</p>
+
+<p>If the operands are integer vectors, then they are compared element by
+ element. The result is an <tt>i1</tt> vector with the same number of elements
+ as the values being compared. Otherwise, the result is an <tt>i1</tt>.</p>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = icmp eq i32 4, 5 <i>; yields: result=false</i>
+ &lt;result&gt; = icmp ne float* %X, %X <i>; yields: result=false</i>
+ &lt;result&gt; = icmp ult i16 4, 5 <i>; yields: result=true</i>
+ &lt;result&gt; = icmp sgt i16 4, 5 <i>; yields: result=false</i>
+ &lt;result&gt; = icmp ule i16 -4, 5 <i>; yields: result=false</i>
+ &lt;result&gt; = icmp sge i16 4, 5 <i>; yields: result=false</i>
+</pre>
+
+<p>Note that the code generator does not yet support vector types with
+ the <tt>icmp</tt> instruction.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_fcmp">'<tt>fcmp</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = fcmp &lt;cond&gt; &lt;ty&gt; &lt;op1&gt;, &lt;op2&gt; <i>; yields {i1} or {&lt;N x i1&gt;}:result</i>
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>fcmp</tt>' instruction returns a boolean value or vector of boolean
+ values based on comparison of its operands.</p>
+
+<p>If the operands are floating point scalars, then the result type is a boolean
+(<a href="#t_integer"><tt>i1</tt></a>).</p>
+
+<p>If the operands are floating point vectors, then the result type is a vector
+ of boolean with the same number of elements as the operands being
+ compared.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>fcmp</tt>' instruction takes three operands. The first operand is
+ the condition code indicating the kind of comparison to perform. It is not a
+ value, just a keyword. The possible condition code are:</p>
+
+<ol>
+ <li><tt>false</tt>: no comparison, always returns false</li>
+ <li><tt>oeq</tt>: ordered and equal</li>
+ <li><tt>ogt</tt>: ordered and greater than </li>
+ <li><tt>oge</tt>: ordered and greater than or equal</li>
+ <li><tt>olt</tt>: ordered and less than </li>
+ <li><tt>ole</tt>: ordered and less than or equal</li>
+ <li><tt>one</tt>: ordered and not equal</li>
+ <li><tt>ord</tt>: ordered (no nans)</li>
+ <li><tt>ueq</tt>: unordered or equal</li>
+ <li><tt>ugt</tt>: unordered or greater than </li>
+ <li><tt>uge</tt>: unordered or greater than or equal</li>
+ <li><tt>ult</tt>: unordered or less than </li>
+ <li><tt>ule</tt>: unordered or less than or equal</li>
+ <li><tt>une</tt>: unordered or not equal</li>
+ <li><tt>uno</tt>: unordered (either nans)</li>
+ <li><tt>true</tt>: no comparison, always returns true</li>
+</ol>
+
+<p><i>Ordered</i> means that neither operand is a QNAN while
+ <i>unordered</i> means that either operand may be a QNAN.</p>
+
+<p>Each of <tt>val1</tt> and <tt>val2</tt> arguments must be either
+ a <a href="#t_floating">floating point</a> type or
+ a <a href="#t_vector">vector</a> of floating point type. They must have
+ identical types.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>fcmp</tt>' instruction compares <tt>op1</tt> and <tt>op2</tt>
+ according to the condition code given as <tt>cond</tt>. If the operands are
+ vectors, then the vectors are compared element by element. Each comparison
+ performed always yields an <a href="#t_integer">i1</a> result, as
+ follows:</p>
+
+<ol>
+ <li><tt>false</tt>: always yields <tt>false</tt>, regardless of operands.</li>
+
+ <li><tt>oeq</tt>: yields <tt>true</tt> if both operands are not a QNAN and
+ <tt>op1</tt> is equal to <tt>op2</tt>.</li>
+
+ <li><tt>ogt</tt>: yields <tt>true</tt> if both operands are not a QNAN and
+ <tt>op1</tt> is greater than <tt>op2</tt>.</li>
+
+ <li><tt>oge</tt>: yields <tt>true</tt> if both operands are not a QNAN and
+ <tt>op1</tt> is greater than or equal to <tt>op2</tt>.</li>
+
+ <li><tt>olt</tt>: yields <tt>true</tt> if both operands are not a QNAN and
+ <tt>op1</tt> is less than <tt>op2</tt>.</li>
+
+ <li><tt>ole</tt>: yields <tt>true</tt> if both operands are not a QNAN and
+ <tt>op1</tt> is less than or equal to <tt>op2</tt>.</li>
+
+ <li><tt>one</tt>: yields <tt>true</tt> if both operands are not a QNAN and
+ <tt>op1</tt> is not equal to <tt>op2</tt>.</li>
+
+ <li><tt>ord</tt>: yields <tt>true</tt> if both operands are not a QNAN.</li>
+
+ <li><tt>ueq</tt>: yields <tt>true</tt> if either operand is a QNAN or
+ <tt>op1</tt> is equal to <tt>op2</tt>.</li>
+
+ <li><tt>ugt</tt>: yields <tt>true</tt> if either operand is a QNAN or
+ <tt>op1</tt> is greater than <tt>op2</tt>.</li>
+
+ <li><tt>uge</tt>: yields <tt>true</tt> if either operand is a QNAN or
+ <tt>op1</tt> is greater than or equal to <tt>op2</tt>.</li>
+
+ <li><tt>ult</tt>: yields <tt>true</tt> if either operand is a QNAN or
+ <tt>op1</tt> is less than <tt>op2</tt>.</li>
+
+ <li><tt>ule</tt>: yields <tt>true</tt> if either operand is a QNAN or
+ <tt>op1</tt> is less than or equal to <tt>op2</tt>.</li>
+
+ <li><tt>une</tt>: yields <tt>true</tt> if either operand is a QNAN or
+ <tt>op1</tt> is not equal to <tt>op2</tt>.</li>
+
+ <li><tt>uno</tt>: yields <tt>true</tt> if either operand is a QNAN.</li>
+
+ <li><tt>true</tt>: always yields <tt>true</tt>, regardless of operands.</li>
+</ol>
+
+<h5>Example:</h5>
+<pre>
+ &lt;result&gt; = fcmp oeq float 4.0, 5.0 <i>; yields: result=false</i>
+ &lt;result&gt; = fcmp one float 4.0, 5.0 <i>; yields: result=true</i>
+ &lt;result&gt; = fcmp olt float 4.0, 5.0 <i>; yields: result=true</i>
+ &lt;result&gt; = fcmp ueq double 1.0, 2.0 <i>; yields: result=false</i>
+</pre>
+
+<p>Note that the code generator does not yet support vector types with
+ the <tt>fcmp</tt> instruction.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_phi">'<tt>phi</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = phi &lt;ty&gt; [ &lt;val0&gt;, &lt;label0&gt;], ...
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>phi</tt>' instruction is used to implement the &#966; node in the
+ SSA graph representing the function.</p>
+
+<h5>Arguments:</h5>
+<p>The type of the incoming values is specified with the first type field. After
+ this, the '<tt>phi</tt>' instruction takes a list of pairs as arguments, with
+ one pair for each predecessor basic block of the current block. Only values
+ of <a href="#t_firstclass">first class</a> type may be used as the value
+ arguments to the PHI node. Only labels may be used as the label
+ arguments.</p>
+
+<p>There must be no non-phi instructions between the start of a basic block and
+ the PHI instructions: i.e. PHI instructions must be first in a basic
+ block.</p>
+
+<p>For the purposes of the SSA form, the use of each incoming value is deemed to
+ occur on the edge from the corresponding predecessor block to the current
+ block (but after any definition of an '<tt>invoke</tt>' instruction's return
+ value on the same edge).</p>
+
+<h5>Semantics:</h5>
+<p>At runtime, the '<tt>phi</tt>' instruction logically takes on the value
+ specified by the pair corresponding to the predecessor basic block that
+ executed just prior to the current block.</p>
+
+<h5>Example:</h5>
+<pre>
+Loop: ; Infinite loop that counts from 0 on up...
+ %indvar = phi i32 [ 0, %LoopHeader ], [ %nextindvar, %Loop ]
+ %nextindvar = add i32 %indvar, 1
+ br label %Loop
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_select">'<tt>select</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = select <i>selty</i> &lt;cond&gt;, &lt;ty&gt; &lt;val1&gt;, &lt;ty&gt; &lt;val2&gt; <i>; yields ty</i>
+
+ <i>selty</i> is either i1 or {&lt;N x i1&gt;}
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>select</tt>' instruction is used to choose one value based on a
+ condition, without branching.</p>
+
+
+<h5>Arguments:</h5>
+<p>The '<tt>select</tt>' instruction requires an 'i1' value or a vector of 'i1'
+ values indicating the condition, and two values of the
+ same <a href="#t_firstclass">first class</a> type. If the val1/val2 are
+ vectors and the condition is a scalar, then entire vectors are selected, not
+ individual elements.</p>
+
+<h5>Semantics:</h5>
+<p>If the condition is an i1 and it evaluates to 1, the instruction returns the
+ first value argument; otherwise, it returns the second value argument.</p>
+
+<p>If the condition is a vector of i1, then the value arguments must be vectors
+ of the same size, and the selection is done element by element.</p>
+
+<h5>Example:</h5>
+<pre>
+ %X = select i1 true, i8 17, i8 42 <i>; yields i8:17</i>
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_call">'<tt>call</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;result&gt; = [tail] call [<a href="#callingconv">cconv</a>] [<a href="#paramattrs">ret attrs</a>] &lt;ty&gt; [&lt;fnty&gt;*] &lt;fnptrval&gt;(&lt;function args&gt;) [<a href="#fnattrs">fn attrs</a>]
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>call</tt>' instruction represents a simple function call.</p>
+
+<h5>Arguments:</h5>
+<p>This instruction requires several arguments:</p>
+
+<ol>
+ <li>The optional "tail" marker indicates that the callee function does not
+ access any allocas or varargs in the caller. Note that calls may be
+ marked "tail" even if they do not occur before
+ a <a href="#i_ret"><tt>ret</tt></a> instruction. If the "tail" marker is
+ present, the function call is eligible for tail call optimization,
+ but <a href="CodeGenerator.html#tailcallopt">might not in fact be
+ optimized into a jump</a>. The code generator may optimize calls marked
+ "tail" with either 1) automatic <a href="CodeGenerator.html#sibcallopt">
+ sibling call optimization</a> when the caller and callee have
+ matching signatures, or 2) forced tail call optimization when the
+ following extra requirements are met:
+ <ul>
+ <li>Caller and callee both have the calling
+ convention <tt>fastcc</tt>.</li>
+ <li>The call is in tail position (ret immediately follows call and ret
+ uses value of call or is void).</li>
+ <li>Option <tt>-tailcallopt</tt> is enabled,
+ or <code>llvm::GuaranteedTailCallOpt</code> is <code>true</code>.</li>
+ <li><a href="CodeGenerator.html#tailcallopt">Platform specific
+ constraints are met.</a></li>
+ </ul>
+ </li>
+
+ <li>The optional "cconv" marker indicates which <a href="#callingconv">calling
+ convention</a> the call should use. If none is specified, the call
+ defaults to using C calling conventions. The calling convention of the
+ call must match the calling convention of the target function, or else the
+ behavior is undefined.</li>
+
+ <li>The optional <a href="#paramattrs">Parameter Attributes</a> list for
+ return values. Only '<tt>zeroext</tt>', '<tt>signext</tt>', and
+ '<tt>inreg</tt>' attributes are valid here.</li>
+
+ <li>'<tt>ty</tt>': the type of the call instruction itself which is also the
+ type of the return value. Functions that return no value are marked
+ <tt><a href="#t_void">void</a></tt>.</li>
+
+ <li>'<tt>fnty</tt>': shall be the signature of the pointer to function value
+ being invoked. The argument types must match the types implied by this
+ signature. This type can be omitted if the function is not varargs and if
+ the function type does not return a pointer to a function.</li>
+
+ <li>'<tt>fnptrval</tt>': An LLVM value containing a pointer to a function to
+ be invoked. In most cases, this is a direct function invocation, but
+ indirect <tt>call</tt>s are just as possible, calling an arbitrary pointer
+ to function value.</li>
+
+ <li>'<tt>function args</tt>': argument list whose types match the function
+ signature argument types and parameter attributes. All arguments must be
+ of <a href="#t_firstclass">first class</a> type. If the function
+ signature indicates the function accepts a variable number of arguments,
+ the extra arguments can be specified.</li>
+
+ <li>The optional <a href="#fnattrs">function attributes</a> list. Only
+ '<tt>noreturn</tt>', '<tt>nounwind</tt>', '<tt>readonly</tt>' and
+ '<tt>readnone</tt>' attributes are valid here.</li>
+</ol>
+
+<h5>Semantics:</h5>
+<p>The '<tt>call</tt>' instruction is used to cause control flow to transfer to
+ a specified function, with its incoming arguments bound to the specified
+ values. Upon a '<tt><a href="#i_ret">ret</a></tt>' instruction in the called
+ function, control flow continues with the instruction after the function
+ call, and the return value of the function is bound to the result
+ argument.</p>
+
+<h5>Example:</h5>
+<pre>
+ %retval = call i32 @test(i32 %argc)
+ call i32 (i8*, ...)* @printf(i8* %msg, i32 12, i8 42) <i>; yields i32</i>
+ %X = tail call i32 @foo() <i>; yields i32</i>
+ %Y = tail call <a href="#callingconv">fastcc</a> i32 @foo() <i>; yields i32</i>
+ call void %foo(i8 97 signext)
+
+ %struct.A = type { i32, i8 }
+ %r = call %struct.A @foo() <i>; yields { 32, i8 }</i>
+ %gr = extractvalue %struct.A %r, 0 <i>; yields i32</i>
+ %gr1 = extractvalue %struct.A %r, 1 <i>; yields i8</i>
+ %Z = call void @foo() noreturn <i>; indicates that %foo never returns normally</i>
+ %ZZ = call zeroext i32 @bar() <i>; Return value is %zero extended</i>
+</pre>
+
+<p>llvm treats calls to some functions with names and arguments that match the
+standard C99 library as being the C99 library functions, and may perform
+optimizations or generate code for them under that assumption. This is
+something we'd like to change in the future to provide better support for
+freestanding environments and non-C-based languages.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_va_arg">'<tt>va_arg</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;resultval&gt; = va_arg &lt;va_list*&gt; &lt;arglist&gt;, &lt;argty&gt;
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>va_arg</tt>' instruction is used to access arguments passed through
+ the "variable argument" area of a function call. It is used to implement the
+ <tt>va_arg</tt> macro in C.</p>
+
+<h5>Arguments:</h5>
+<p>This instruction takes a <tt>va_list*</tt> value and the type of the
+ argument. It returns a value of the specified argument type and increments
+ the <tt>va_list</tt> to point to the next argument. The actual type
+ of <tt>va_list</tt> is target specific.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>va_arg</tt>' instruction loads an argument of the specified type
+ from the specified <tt>va_list</tt> and causes the <tt>va_list</tt> to point
+ to the next argument. For more information, see the variable argument
+ handling <a href="#int_varargs">Intrinsic Functions</a>.</p>
+
+<p>It is legal for this instruction to be called in a function which does not
+ take a variable number of arguments, for example, the <tt>vfprintf</tt>
+ function.</p>
+
+<p><tt>va_arg</tt> is an LLVM instruction instead of
+ an <a href="#intrinsics">intrinsic function</a> because it takes a type as an
+ argument.</p>
+
+<h5>Example:</h5>
+<p>See the <a href="#int_varargs">variable argument processing</a> section.</p>
+
+<p>Note that the code generator does not yet fully support va_arg on many
+ targets. Also, it does not currently support va_arg with aggregate types on
+ any target.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="i_landingpad">'<tt>landingpad</tt>' Instruction</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ &lt;resultval&gt; = landingpad &lt;resultty&gt; personality &lt;type&gt; &lt;pers_fn&gt; &lt;clause&gt;+
+ &lt;resultval&gt; = landingpad &lt;resultty&gt; personality &lt;type&gt; &lt;pers_fn&gt; cleanup &lt;clause&gt;*
+
+ &lt;clause&gt; := catch &lt;type&gt; &lt;value&gt;
+ &lt;clause&gt; := filter &lt;array constant type&gt; &lt;array constant&gt;
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>landingpad</tt>' instruction is used by
+ <a href="ExceptionHandling.html#overview">LLVM's exception handling
+ system</a> to specify that a basic block is a landing pad &mdash; one where
+ the exception lands, and corresponds to the code found in the
+ <i><tt>catch</tt></i> portion of a <i><tt>try/catch</tt></i> sequence. It
+ defines values supplied by the personality function (<tt>pers_fn</tt>) upon
+ re-entry to the function. The <tt>resultval</tt> has the
+ type <tt>resultty</tt>.</p>
+
+<h5>Arguments:</h5>
+<p>This instruction takes a <tt>pers_fn</tt> value. This is the personality
+ function associated with the unwinding mechanism. The optional
+ <tt>cleanup</tt> flag indicates that the landing pad block is a cleanup.</p>
+
+<p>A <tt>clause</tt> begins with the clause type &mdash; <tt>catch</tt>
+ or <tt>filter</tt> &mdash; and contains the global variable representing the
+ "type" that may be caught or filtered respectively. Unlike the
+ <tt>catch</tt> clause, the <tt>filter</tt> clause takes an array constant as
+ its argument. Use "<tt>[0 x i8**] undef</tt>" for a filter which cannot
+ throw. The '<tt>landingpad</tt>' instruction must contain <em>at least</em>
+ one <tt>clause</tt> or the <tt>cleanup</tt> flag.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>landingpad</tt>' instruction defines the values which are set by the
+ personality function (<tt>pers_fn</tt>) upon re-entry to the function, and
+ therefore the "result type" of the <tt>landingpad</tt> instruction. As with
+ calling conventions, how the personality function results are represented in
+ LLVM IR is target specific.</p>
+
+<p>The clauses are applied in order from top to bottom. If two
+ <tt>landingpad</tt> instructions are merged together through inlining, the
+ clauses from the calling function are appended to the list of clauses.
+ When the call stack is being unwound due to an exception being thrown, the
+ exception is compared against each <tt>clause</tt> in turn. If it doesn't
+ match any of the clauses, and the <tt>cleanup</tt> flag is not set, then
+ unwinding continues further up the call stack.</p>
+
+<p>The <tt>landingpad</tt> instruction has several restrictions:</p>
+
+<ul>
+ <li>A landing pad block is a basic block which is the unwind destination of an
+ '<tt>invoke</tt>' instruction.</li>
+ <li>A landing pad block must have a '<tt>landingpad</tt>' instruction as its
+ first non-PHI instruction.</li>
+ <li>There can be only one '<tt>landingpad</tt>' instruction within the landing
+ pad block.</li>
+ <li>A basic block that is not a landing pad block may not include a
+ '<tt>landingpad</tt>' instruction.</li>
+ <li>All '<tt>landingpad</tt>' instructions in a function must have the same
+ personality function.</li>
+</ul>
+
+<h5>Example:</h5>
+<pre>
+ ;; A landing pad which can catch an integer.
+ %res = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ catch i8** @_ZTIi
+ ;; A landing pad that is a cleanup.
+ %res = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ ;; A landing pad which can catch an integer and can only throw a double.
+ %res = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ catch i8** @_ZTIi
+ filter [1 x i8**] [@_ZTId]
+</pre>
+
+</div>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intrinsics">Intrinsic Functions</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>LLVM supports the notion of an "intrinsic function". These functions have
+ well known names and semantics and are required to follow certain
+ restrictions. Overall, these intrinsics represent an extension mechanism for
+ the LLVM language that does not require changing all of the transformations
+ in LLVM when adding to the language (or the bitcode reader/writer, the
+ parser, etc...).</p>
+
+<p>Intrinsic function names must all start with an "<tt>llvm.</tt>" prefix. This
+ prefix is reserved in LLVM for intrinsic names; thus, function names may not
+ begin with this prefix. Intrinsic functions must always be external
+ functions: you cannot define the body of intrinsic functions. Intrinsic
+ functions may only be used in call or invoke instructions: it is illegal to
+ take the address of an intrinsic function. Additionally, because intrinsic
+ functions are part of the LLVM language, it is required if any are added that
+ they be documented here.</p>
+
+<p>Some intrinsic functions can be overloaded, i.e., the intrinsic represents a
+ family of functions that perform the same operation but on different data
+ types. Because LLVM can represent over 8 million different integer types,
+ overloading is used commonly to allow an intrinsic function to operate on any
+ integer type. One or more of the argument types or the result type can be
+ overloaded to accept any integer type. Argument types may also be defined as
+ exactly matching a previous argument's type or the result type. This allows
+ an intrinsic function which accepts multiple arguments, but needs all of them
+ to be of the same type, to only be overloaded with respect to a single
+ argument or the result.</p>
+
+<p>Overloaded intrinsics will have the names of its overloaded argument types
+ encoded into its function name, each preceded by a period. Only those types
+ which are overloaded result in a name suffix. Arguments whose type is matched
+ against another type do not. For example, the <tt>llvm.ctpop</tt> function
+ can take an integer of any width and returns an integer of exactly the same
+ integer width. This leads to a family of functions such as
+ <tt>i8 @llvm.ctpop.i8(i8 %val)</tt> and <tt>i29 @llvm.ctpop.i29(i29
+ %val)</tt>. Only one type, the return type, is overloaded, and only one type
+ suffix is required. Because the argument's type is matched against the return
+ type, it does not require its own name suffix.</p>
+
+<p>To learn how to add an intrinsic function, please see the
+ <a href="ExtendingLLVM.html">Extending LLVM Guide</a>.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="int_varargs">Variable Argument Handling Intrinsics</a>
+</h3>
+
+<div>
+
+<p>Variable argument support is defined in LLVM with
+ the <a href="#i_va_arg"><tt>va_arg</tt></a> instruction and these three
+ intrinsic functions. These functions are related to the similarly named
+ macros defined in the <tt>&lt;stdarg.h&gt;</tt> header file.</p>
+
+<p>All of these functions operate on arguments that use a target-specific value
+ type "<tt>va_list</tt>". The LLVM assembly language reference manual does
+ not define what this type is, so all transformations should be prepared to
+ handle these functions regardless of the type used.</p>
+
+<p>This example shows how the <a href="#i_va_arg"><tt>va_arg</tt></a>
+ instruction and the variable argument handling intrinsic functions are
+ used.</p>
+
+<pre class="doc_code">
+define i32 @test(i32 %X, ...) {
+ ; Initialize variable argument processing
+ %ap = alloca i8*
+ %ap2 = bitcast i8** %ap to i8*
+ call void @llvm.va_start(i8* %ap2)
+
+ ; Read a single integer argument
+ %tmp = va_arg i8** %ap, i32
+
+ ; Demonstrate usage of llvm.va_copy and llvm.va_end
+ %aq = alloca i8*
+ %aq2 = bitcast i8** %aq to i8*
+ call void @llvm.va_copy(i8* %aq2, i8* %ap2)
+ call void @llvm.va_end(i8* %aq2)
+
+ ; Stop processing of arguments.
+ call void @llvm.va_end(i8* %ap2)
+ ret i32 %tmp
+}
+
+declare void @llvm.va_start(i8*)
+declare void @llvm.va_copy(i8*, i8*)
+declare void @llvm.va_end(i8*)
+</pre>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_va_start">'<tt>llvm.va_start</tt>' Intrinsic</a>
+</h4>
+
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void %llvm.va_start(i8* &lt;arglist&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.va_start</tt>' intrinsic initializes <tt>*&lt;arglist&gt;</tt>
+ for subsequent use by <tt><a href="#i_va_arg">va_arg</a></tt>.</p>
+
+<h5>Arguments:</h5>
+<p>The argument is a pointer to a <tt>va_list</tt> element to initialize.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.va_start</tt>' intrinsic works just like the <tt>va_start</tt>
+ macro available in C. In a target-dependent way, it initializes
+ the <tt>va_list</tt> element to which the argument points, so that the next
+ call to <tt>va_arg</tt> will produce the first variable argument passed to
+ the function. Unlike the C <tt>va_start</tt> macro, this intrinsic does not
+ need to know the last argument of the function as the compiler can figure
+ that out.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_va_end">'<tt>llvm.va_end</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.va_end(i8* &lt;arglist&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.va_end</tt>' intrinsic destroys <tt>*&lt;arglist&gt;</tt>,
+ which has been initialized previously
+ with <tt><a href="#int_va_start">llvm.va_start</a></tt>
+ or <tt><a href="#i_va_copy">llvm.va_copy</a></tt>.</p>
+
+<h5>Arguments:</h5>
+<p>The argument is a pointer to a <tt>va_list</tt> to destroy.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.va_end</tt>' intrinsic works just like the <tt>va_end</tt>
+ macro available in C. In a target-dependent way, it destroys
+ the <tt>va_list</tt> element to which the argument points. Calls
+ to <a href="#int_va_start"><tt>llvm.va_start</tt></a>
+ and <a href="#int_va_copy"> <tt>llvm.va_copy</tt></a> must be matched exactly
+ with calls to <tt>llvm.va_end</tt>.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_va_copy">'<tt>llvm.va_copy</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.va_copy(i8* &lt;destarglist&gt;, i8* &lt;srcarglist&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.va_copy</tt>' intrinsic copies the current argument position
+ from the source argument list to the destination argument list.</p>
+
+<h5>Arguments:</h5>
+<p>The first argument is a pointer to a <tt>va_list</tt> element to initialize.
+ The second argument is a pointer to a <tt>va_list</tt> element to copy
+ from.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.va_copy</tt>' intrinsic works just like the <tt>va_copy</tt>
+ macro available in C. In a target-dependent way, it copies the
+ source <tt>va_list</tt> element into the destination <tt>va_list</tt>
+ element. This intrinsic is necessary because
+ the <tt><a href="#int_va_start"> llvm.va_start</a></tt> intrinsic may be
+ arbitrarily complex and require, for example, memory allocation.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="int_gc">Accurate Garbage Collection Intrinsics</a>
+</h3>
+
+<div>
+
+<p>LLVM support for <a href="GarbageCollection.html">Accurate Garbage
+Collection</a> (GC) requires the implementation and generation of these
+intrinsics. These intrinsics allow identification of <a href="#int_gcroot">GC
+roots on the stack</a>, as well as garbage collector implementations that
+require <a href="#int_gcread">read</a> and <a href="#int_gcwrite">write</a>
+barriers. Front-ends for type-safe garbage collected languages should generate
+these intrinsics to make use of the LLVM garbage collectors. For more details,
+see <a href="GarbageCollection.html">Accurate Garbage Collection with
+LLVM</a>.</p>
+
+<p>The garbage collection intrinsics only operate on objects in the generic
+ address space (address space zero).</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_gcroot">'<tt>llvm.gcroot</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.gcroot(i8** %ptrloc, i8* %metadata)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.gcroot</tt>' intrinsic declares the existence of a GC root to
+ the code generator, and allows some metadata to be associated with it.</p>
+
+<h5>Arguments:</h5>
+<p>The first argument specifies the address of a stack object that contains the
+ root pointer. The second pointer (which must be either a constant or a
+ global value address) contains the meta-data to be associated with the
+ root.</p>
+
+<h5>Semantics:</h5>
+<p>At runtime, a call to this intrinsic stores a null pointer into the "ptrloc"
+ location. At compile-time, the code generator generates information to allow
+ the runtime to find the pointer at GC safe points. The '<tt>llvm.gcroot</tt>'
+ intrinsic may only be used in a function which <a href="#gc">specifies a GC
+ algorithm</a>.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_gcread">'<tt>llvm.gcread</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare i8* @llvm.gcread(i8* %ObjPtr, i8** %Ptr)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.gcread</tt>' intrinsic identifies reads of references from heap
+ locations, allowing garbage collector implementations that require read
+ barriers.</p>
+
+<h5>Arguments:</h5>
+<p>The second argument is the address to read from, which should be an address
+ allocated from the garbage collector. The first object is a pointer to the
+ start of the referenced object, if needed by the language runtime (otherwise
+ null).</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.gcread</tt>' intrinsic has the same semantics as a load
+ instruction, but may be replaced with substantially more complex code by the
+ garbage collector runtime, as needed. The '<tt>llvm.gcread</tt>' intrinsic
+ may only be used in a function which <a href="#gc">specifies a GC
+ algorithm</a>.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_gcwrite">'<tt>llvm.gcwrite</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.gcwrite(i8* %P1, i8* %Obj, i8** %P2)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.gcwrite</tt>' intrinsic identifies writes of references to heap
+ locations, allowing garbage collector implementations that require write
+ barriers (such as generational or reference counting collectors).</p>
+
+<h5>Arguments:</h5>
+<p>The first argument is the reference to store, the second is the start of the
+ object to store it to, and the third is the address of the field of Obj to
+ store to. If the runtime does not require a pointer to the object, Obj may
+ be null.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.gcwrite</tt>' intrinsic has the same semantics as a store
+ instruction, but may be replaced with substantially more complex code by the
+ garbage collector runtime, as needed. The '<tt>llvm.gcwrite</tt>' intrinsic
+ may only be used in a function which <a href="#gc">specifies a GC
+ algorithm</a>.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="int_codegen">Code Generator Intrinsics</a>
+</h3>
+
+<div>
+
+<p>These intrinsics are provided by LLVM to expose special features that may
+ only be implemented with code generator support.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_returnaddress">'<tt>llvm.returnaddress</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare i8 *@llvm.returnaddress(i32 &lt;level&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.returnaddress</tt>' intrinsic attempts to compute a
+ target-specific value indicating the return address of the current function
+ or one of its callers.</p>
+
+<h5>Arguments:</h5>
+<p>The argument to this intrinsic indicates which function to return the address
+ for. Zero indicates the calling function, one indicates its caller, etc.
+ The argument is <b>required</b> to be a constant integer value.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.returnaddress</tt>' intrinsic either returns a pointer
+ indicating the return address of the specified call frame, or zero if it
+ cannot be identified. The value returned by this intrinsic is likely to be
+ incorrect or 0 for arguments other than zero, so it should only be used for
+ debugging purposes.</p>
+
+<p>Note that calling this intrinsic does not prevent function inlining or other
+ aggressive transformations, so the value returned may not be that of the
+ obvious source-language caller.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_frameaddress">'<tt>llvm.frameaddress</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare i8* @llvm.frameaddress(i32 &lt;level&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.frameaddress</tt>' intrinsic attempts to return the
+ target-specific frame pointer value for the specified stack frame.</p>
+
+<h5>Arguments:</h5>
+<p>The argument to this intrinsic indicates which function to return the frame
+ pointer for. Zero indicates the calling function, one indicates its caller,
+ etc. The argument is <b>required</b> to be a constant integer value.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.frameaddress</tt>' intrinsic either returns a pointer
+ indicating the frame address of the specified call frame, or zero if it
+ cannot be identified. The value returned by this intrinsic is likely to be
+ incorrect or 0 for arguments other than zero, so it should only be used for
+ debugging purposes.</p>
+
+<p>Note that calling this intrinsic does not prevent function inlining or other
+ aggressive transformations, so the value returned may not be that of the
+ obvious source-language caller.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_stacksave">'<tt>llvm.stacksave</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare i8* @llvm.stacksave()
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.stacksave</tt>' intrinsic is used to remember the current state
+ of the function stack, for use
+ with <a href="#int_stackrestore"> <tt>llvm.stackrestore</tt></a>. This is
+ useful for implementing language features like scoped automatic variable
+ sized arrays in C99.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic returns a opaque pointer value that can be passed
+ to <a href="#int_stackrestore"><tt>llvm.stackrestore</tt></a>. When
+ an <tt>llvm.stackrestore</tt> intrinsic is executed with a value saved
+ from <tt>llvm.stacksave</tt>, it effectively restores the state of the stack
+ to the state it was in when the <tt>llvm.stacksave</tt> intrinsic executed.
+ In practice, this pops any <a href="#i_alloca">alloca</a> blocks from the
+ stack that were allocated after the <tt>llvm.stacksave</tt> was executed.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_stackrestore">'<tt>llvm.stackrestore</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.stackrestore(i8* %ptr)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.stackrestore</tt>' intrinsic is used to restore the state of
+ the function stack to the state it was in when the
+ corresponding <a href="#int_stacksave"><tt>llvm.stacksave</tt></a> intrinsic
+ executed. This is useful for implementing language features like scoped
+ automatic variable sized arrays in C99.</p>
+
+<h5>Semantics:</h5>
+<p>See the description
+ for <a href="#int_stacksave"><tt>llvm.stacksave</tt></a>.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_prefetch">'<tt>llvm.prefetch</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.prefetch(i8* &lt;address&gt;, i32 &lt;rw&gt;, i32 &lt;locality&gt;, i32 &lt;cache type&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.prefetch</tt>' intrinsic is a hint to the code generator to
+ insert a prefetch instruction if supported; otherwise, it is a noop.
+ Prefetches have no effect on the behavior of the program but can change its
+ performance characteristics.</p>
+
+<h5>Arguments:</h5>
+<p><tt>address</tt> is the address to be prefetched, <tt>rw</tt> is the
+ specifier determining if the fetch should be for a read (0) or write (1),
+ and <tt>locality</tt> is a temporal locality specifier ranging from (0) - no
+ locality, to (3) - extremely local keep in cache. The <tt>cache type</tt>
+ specifies whether the prefetch is performed on the data (1) or instruction (0)
+ cache. The <tt>rw</tt>, <tt>locality</tt> and <tt>cache type</tt> arguments
+ must be constant integers.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic does not modify the behavior of the program. In particular,
+ prefetches cannot trap and do not produce a value. On targets that support
+ this intrinsic, the prefetch can provide hints to the processor cache for
+ better performance.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_pcmarker">'<tt>llvm.pcmarker</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.pcmarker(i32 &lt;id&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.pcmarker</tt>' intrinsic is a method to export a Program
+ Counter (PC) in a region of code to simulators and other tools. The method
+ is target specific, but it is expected that the marker will use exported
+ symbols to transmit the PC of the marker. The marker makes no guarantees
+ that it will remain with any specific instruction after optimizations. It is
+ possible that the presence of a marker will inhibit optimizations. The
+ intended use is to be inserted after optimizations to allow correlations of
+ simulation runs.</p>
+
+<h5>Arguments:</h5>
+<p><tt>id</tt> is a numerical id identifying the marker.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic does not modify the behavior of the program. Backends that do
+ not support this intrinsic may ignore it.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_readcyclecounter">'<tt>llvm.readcyclecounter</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare i64 @llvm.readcyclecounter()
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.readcyclecounter</tt>' intrinsic provides access to the cycle
+ counter register (or similar low latency, high accuracy clocks) on those
+ targets that support it. On X86, it should map to RDTSC. On Alpha, it
+ should map to RPCC. As the backing counters overflow quickly (on the order
+ of 9 seconds on alpha), this should only be used for small timings.</p>
+
+<h5>Semantics:</h5>
+<p>When directly supported, reading the cycle counter should not modify any
+ memory. Implementations are allowed to either return a application specific
+ value or a system wide value. On backends without support, this is lowered
+ to a constant 0.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="int_libc">Standard C Library Intrinsics</a>
+</h3>
+
+<div>
+
+<p>LLVM provides intrinsics for a few important standard C library functions.
+ These intrinsics allow source-language front-ends to pass information about
+ the alignment of the pointer arguments to the code generator, providing
+ opportunity for more efficient code generation.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_memcpy">'<tt>llvm.memcpy</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.memcpy</tt> on any
+ integer bit width and for different address spaces. Not all targets support
+ all bit widths however.</p>
+
+<pre>
+ declare void @llvm.memcpy.p0i8.p0i8.i32(i8* &lt;dest&gt;, i8* &lt;src&gt;,
+ i32 &lt;len&gt;, i32 &lt;align&gt;, i1 &lt;isvolatile&gt;)
+ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* &lt;dest&gt;, i8* &lt;src&gt;,
+ i64 &lt;len&gt;, i32 &lt;align&gt;, i1 &lt;isvolatile&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.memcpy.*</tt>' intrinsics copy a block of memory from the
+ source location to the destination location.</p>
+
+<p>Note that, unlike the standard libc function, the <tt>llvm.memcpy.*</tt>
+ intrinsics do not return a value, takes extra alignment/isvolatile arguments
+ and the pointers can be in specified address spaces.</p>
+
+<h5>Arguments:</h5>
+
+<p>The first argument is a pointer to the destination, the second is a pointer
+ to the source. The third argument is an integer argument specifying the
+ number of bytes to copy, the fourth argument is the alignment of the
+ source and destination locations, and the fifth is a boolean indicating a
+ volatile access.</p>
+
+<p>If the call to this intrinsic has an alignment value that is not 0 or 1,
+ then the caller guarantees that both the source and destination pointers are
+ aligned to that boundary.</p>
+
+<p>If the <tt>isvolatile</tt> parameter is <tt>true</tt>, the
+ <tt>llvm.memcpy</tt> call is a <a href="#volatile">volatile operation</a>.
+ The detailed access behavior is not very cleanly specified and it is unwise
+ to depend on it.</p>
+
+<h5>Semantics:</h5>
+
+<p>The '<tt>llvm.memcpy.*</tt>' intrinsics copy a block of memory from the
+ source location to the destination location, which are not allowed to
+ overlap. It copies "len" bytes of memory over. If the argument is known to
+ be aligned to some boundary, this can be specified as the fourth argument,
+ otherwise it should be set to 0 or 1.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_memmove">'<tt>llvm.memmove</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use llvm.memmove on any integer bit
+ width and for different address space. Not all targets support all bit
+ widths however.</p>
+
+<pre>
+ declare void @llvm.memmove.p0i8.p0i8.i32(i8* &lt;dest&gt;, i8* &lt;src&gt;,
+ i32 &lt;len&gt;, i32 &lt;align&gt;, i1 &lt;isvolatile&gt;)
+ declare void @llvm.memmove.p0i8.p0i8.i64(i8* &lt;dest&gt;, i8* &lt;src&gt;,
+ i64 &lt;len&gt;, i32 &lt;align&gt;, i1 &lt;isvolatile&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.memmove.*</tt>' intrinsics move a block of memory from the
+ source location to the destination location. It is similar to the
+ '<tt>llvm.memcpy</tt>' intrinsic but allows the two memory locations to
+ overlap.</p>
+
+<p>Note that, unlike the standard libc function, the <tt>llvm.memmove.*</tt>
+ intrinsics do not return a value, takes extra alignment/isvolatile arguments
+ and the pointers can be in specified address spaces.</p>
+
+<h5>Arguments:</h5>
+
+<p>The first argument is a pointer to the destination, the second is a pointer
+ to the source. The third argument is an integer argument specifying the
+ number of bytes to copy, the fourth argument is the alignment of the
+ source and destination locations, and the fifth is a boolean indicating a
+ volatile access.</p>
+
+<p>If the call to this intrinsic has an alignment value that is not 0 or 1,
+ then the caller guarantees that the source and destination pointers are
+ aligned to that boundary.</p>
+
+<p>If the <tt>isvolatile</tt> parameter is <tt>true</tt>, the
+ <tt>llvm.memmove</tt> call is a <a href="#volatile">volatile operation</a>.
+ The detailed access behavior is not very cleanly specified and it is unwise
+ to depend on it.</p>
+
+<h5>Semantics:</h5>
+
+<p>The '<tt>llvm.memmove.*</tt>' intrinsics copy a block of memory from the
+ source location to the destination location, which may overlap. It copies
+ "len" bytes of memory over. If the argument is known to be aligned to some
+ boundary, this can be specified as the fourth argument, otherwise it should
+ be set to 0 or 1.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_memset">'<tt>llvm.memset.*</tt>' Intrinsics</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use llvm.memset on any integer bit
+ width and for different address spaces. However, not all targets support all
+ bit widths.</p>
+
+<pre>
+ declare void @llvm.memset.p0i8.i32(i8* &lt;dest&gt;, i8 &lt;val&gt;,
+ i32 &lt;len&gt;, i32 &lt;align&gt;, i1 &lt;isvolatile&gt;)
+ declare void @llvm.memset.p0i8.i64(i8* &lt;dest&gt;, i8 &lt;val&gt;,
+ i64 &lt;len&gt;, i32 &lt;align&gt;, i1 &lt;isvolatile&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.memset.*</tt>' intrinsics fill a block of memory with a
+ particular byte value.</p>
+
+<p>Note that, unlike the standard libc function, the <tt>llvm.memset</tt>
+ intrinsic does not return a value and takes extra alignment/volatile
+ arguments. Also, the destination can be in an arbitrary address space.</p>
+
+<h5>Arguments:</h5>
+<p>The first argument is a pointer to the destination to fill, the second is the
+ byte value with which to fill it, the third argument is an integer argument
+ specifying the number of bytes to fill, and the fourth argument is the known
+ alignment of the destination location.</p>
+
+<p>If the call to this intrinsic has an alignment value that is not 0 or 1,
+ then the caller guarantees that the destination pointer is aligned to that
+ boundary.</p>
+
+<p>If the <tt>isvolatile</tt> parameter is <tt>true</tt>, the
+ <tt>llvm.memset</tt> call is a <a href="#volatile">volatile operation</a>.
+ The detailed access behavior is not very cleanly specified and it is unwise
+ to depend on it.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.memset.*</tt>' intrinsics fill "len" bytes of memory starting
+ at the destination location. If the argument is known to be aligned to some
+ boundary, this can be specified as the fourth argument, otherwise it should
+ be set to 0 or 1.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_sqrt">'<tt>llvm.sqrt.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.sqrt</tt> on any
+ floating point or vector of floating point type. Not all targets support all
+ types however.</p>
+
+<pre>
+ declare float @llvm.sqrt.f32(float %Val)
+ declare double @llvm.sqrt.f64(double %Val)
+ declare x86_fp80 @llvm.sqrt.f80(x86_fp80 %Val)
+ declare fp128 @llvm.sqrt.f128(fp128 %Val)
+ declare ppc_fp128 @llvm.sqrt.ppcf128(ppc_fp128 %Val)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.sqrt</tt>' intrinsics return the sqrt of the specified operand,
+ returning the same value as the libm '<tt>sqrt</tt>' functions would.
+ Unlike <tt>sqrt</tt> in libm, however, <tt>llvm.sqrt</tt> has undefined
+ behavior for negative numbers other than -0.0 (which allows for better
+ optimization, because there is no need to worry about errno being
+ set). <tt>llvm.sqrt(-0.0)</tt> is defined to return -0.0 like IEEE sqrt.</p>
+
+<h5>Arguments:</h5>
+<p>The argument and return value are floating point numbers of the same
+ type.</p>
+
+<h5>Semantics:</h5>
+<p>This function returns the sqrt of the specified operand if it is a
+ nonnegative floating point number.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_powi">'<tt>llvm.powi.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.powi</tt> on any
+ floating point or vector of floating point type. Not all targets support all
+ types however.</p>
+
+<pre>
+ declare float @llvm.powi.f32(float %Val, i32 %power)
+ declare double @llvm.powi.f64(double %Val, i32 %power)
+ declare x86_fp80 @llvm.powi.f80(x86_fp80 %Val, i32 %power)
+ declare fp128 @llvm.powi.f128(fp128 %Val, i32 %power)
+ declare ppc_fp128 @llvm.powi.ppcf128(ppc_fp128 %Val, i32 %power)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.powi.*</tt>' intrinsics return the first operand raised to the
+ specified (positive or negative) power. The order of evaluation of
+ multiplications is not defined. When a vector of floating point type is
+ used, the second argument remains a scalar integer value.</p>
+
+<h5>Arguments:</h5>
+<p>The second argument is an integer power, and the first is a value to raise to
+ that power.</p>
+
+<h5>Semantics:</h5>
+<p>This function returns the first value raised to the second power with an
+ unspecified sequence of rounding operations.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_sin">'<tt>llvm.sin.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.sin</tt> on any
+ floating point or vector of floating point type. Not all targets support all
+ types however.</p>
+
+<pre>
+ declare float @llvm.sin.f32(float %Val)
+ declare double @llvm.sin.f64(double %Val)
+ declare x86_fp80 @llvm.sin.f80(x86_fp80 %Val)
+ declare fp128 @llvm.sin.f128(fp128 %Val)
+ declare ppc_fp128 @llvm.sin.ppcf128(ppc_fp128 %Val)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.sin.*</tt>' intrinsics return the sine of the operand.</p>
+
+<h5>Arguments:</h5>
+<p>The argument and return value are floating point numbers of the same
+ type.</p>
+
+<h5>Semantics:</h5>
+<p>This function returns the sine of the specified operand, returning the same
+ values as the libm <tt>sin</tt> functions would, and handles error conditions
+ in the same way.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_cos">'<tt>llvm.cos.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.cos</tt> on any
+ floating point or vector of floating point type. Not all targets support all
+ types however.</p>
+
+<pre>
+ declare float @llvm.cos.f32(float %Val)
+ declare double @llvm.cos.f64(double %Val)
+ declare x86_fp80 @llvm.cos.f80(x86_fp80 %Val)
+ declare fp128 @llvm.cos.f128(fp128 %Val)
+ declare ppc_fp128 @llvm.cos.ppcf128(ppc_fp128 %Val)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.cos.*</tt>' intrinsics return the cosine of the operand.</p>
+
+<h5>Arguments:</h5>
+<p>The argument and return value are floating point numbers of the same
+ type.</p>
+
+<h5>Semantics:</h5>
+<p>This function returns the cosine of the specified operand, returning the same
+ values as the libm <tt>cos</tt> functions would, and handles error conditions
+ in the same way.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_pow">'<tt>llvm.pow.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.pow</tt> on any
+ floating point or vector of floating point type. Not all targets support all
+ types however.</p>
+
+<pre>
+ declare float @llvm.pow.f32(float %Val, float %Power)
+ declare double @llvm.pow.f64(double %Val, double %Power)
+ declare x86_fp80 @llvm.pow.f80(x86_fp80 %Val, x86_fp80 %Power)
+ declare fp128 @llvm.pow.f128(fp128 %Val, fp128 %Power)
+ declare ppc_fp128 @llvm.pow.ppcf128(ppc_fp128 %Val, ppc_fp128 Power)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.pow.*</tt>' intrinsics return the first operand raised to the
+ specified (positive or negative) power.</p>
+
+<h5>Arguments:</h5>
+<p>The second argument is a floating point power, and the first is a value to
+ raise to that power.</p>
+
+<h5>Semantics:</h5>
+<p>This function returns the first value raised to the second power, returning
+ the same values as the libm <tt>pow</tt> functions would, and handles error
+ conditions in the same way.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_exp">'<tt>llvm.exp.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.exp</tt> on any
+ floating point or vector of floating point type. Not all targets support all
+ types however.</p>
+
+<pre>
+ declare float @llvm.exp.f32(float %Val)
+ declare double @llvm.exp.f64(double %Val)
+ declare x86_fp80 @llvm.exp.f80(x86_fp80 %Val)
+ declare fp128 @llvm.exp.f128(fp128 %Val)
+ declare ppc_fp128 @llvm.exp.ppcf128(ppc_fp128 %Val)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.exp.*</tt>' intrinsics perform the exp function.</p>
+
+<h5>Arguments:</h5>
+<p>The argument and return value are floating point numbers of the same
+ type.</p>
+
+<h5>Semantics:</h5>
+<p>This function returns the same values as the libm <tt>exp</tt> functions
+ would, and handles error conditions in the same way.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_log">'<tt>llvm.log.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.log</tt> on any
+ floating point or vector of floating point type. Not all targets support all
+ types however.</p>
+
+<pre>
+ declare float @llvm.log.f32(float %Val)
+ declare double @llvm.log.f64(double %Val)
+ declare x86_fp80 @llvm.log.f80(x86_fp80 %Val)
+ declare fp128 @llvm.log.f128(fp128 %Val)
+ declare ppc_fp128 @llvm.log.ppcf128(ppc_fp128 %Val)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.log.*</tt>' intrinsics perform the log function.</p>
+
+<h5>Arguments:</h5>
+<p>The argument and return value are floating point numbers of the same
+ type.</p>
+
+<h5>Semantics:</h5>
+<p>This function returns the same values as the libm <tt>log</tt> functions
+ would, and handles error conditions in the same way.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_fma">'<tt>llvm.fma.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.fma</tt> on any
+ floating point or vector of floating point type. Not all targets support all
+ types however.</p>
+
+<pre>
+ declare float @llvm.fma.f32(float %a, float %b, float %c)
+ declare double @llvm.fma.f64(double %a, double %b, double %c)
+ declare x86_fp80 @llvm.fma.f80(x86_fp80 %a, x86_fp80 %b, x86_fp80 %c)
+ declare fp128 @llvm.fma.f128(fp128 %a, fp128 %b, fp128 %c)
+ declare ppc_fp128 @llvm.fma.ppcf128(ppc_fp128 %a, ppc_fp128 %b, ppc_fp128 %c)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.fma.*</tt>' intrinsics perform the fused multiply-add
+ operation.</p>
+
+<h5>Arguments:</h5>
+<p>The argument and return value are floating point numbers of the same
+ type.</p>
+
+<h5>Semantics:</h5>
+<p>This function returns the same values as the libm <tt>fma</tt> functions
+ would.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_fabs">'<tt>llvm.fabs.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.fabs</tt> on any
+ floating point or vector of floating point type. Not all targets support all
+ types however.</p>
+
+<pre>
+ declare float @llvm.fabs.f32(float %Val)
+ declare double @llvm.fabs.f64(double %Val)
+ declare x86_fp80 @llvm.fabs.f80(x86_fp80 %Val)
+ declare fp128 @llvm.fabs.f128(fp128 %Val)
+ declare ppc_fp128 @llvm.fabs.ppcf128(ppc_fp128 %Val)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.fabs.*</tt>' intrinsics return the absolute value of
+ the operand.</p>
+
+<h5>Arguments:</h5>
+<p>The argument and return value are floating point numbers of the same
+ type.</p>
+
+<h5>Semantics:</h5>
+<p>This function returns the same values as the libm <tt>fabs</tt> functions
+ would, and handles error conditions in the same way.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_floor">'<tt>llvm.floor.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.floor</tt> on any
+ floating point or vector of floating point type. Not all targets support all
+ types however.</p>
+
+<pre>
+ declare float @llvm.floor.f32(float %Val)
+ declare double @llvm.floor.f64(double %Val)
+ declare x86_fp80 @llvm.floor.f80(x86_fp80 %Val)
+ declare fp128 @llvm.floor.f128(fp128 %Val)
+ declare ppc_fp128 @llvm.floor.ppcf128(ppc_fp128 %Val)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.floor.*</tt>' intrinsics return the floor of
+ the operand.</p>
+
+<h5>Arguments:</h5>
+<p>The argument and return value are floating point numbers of the same
+ type.</p>
+
+<h5>Semantics:</h5>
+<p>This function returns the same values as the libm <tt>floor</tt> functions
+ would, and handles error conditions in the same way.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="int_manip">Bit Manipulation Intrinsics</a>
+</h3>
+
+<div>
+
+<p>LLVM provides intrinsics for a few important bit manipulation operations.
+ These allow efficient code generation for some algorithms.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_bswap">'<tt>llvm.bswap.*</tt>' Intrinsics</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic function. You can use bswap on any integer
+ type that is an even number of bytes (i.e. BitWidth % 16 == 0).</p>
+
+<pre>
+ declare i16 @llvm.bswap.i16(i16 &lt;id&gt;)
+ declare i32 @llvm.bswap.i32(i32 &lt;id&gt;)
+ declare i64 @llvm.bswap.i64(i64 &lt;id&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.bswap</tt>' family of intrinsics is used to byte swap integer
+ values with an even number of bytes (positive multiple of 16 bits). These
+ are useful for performing operations on data that is not in the target's
+ native byte order.</p>
+
+<h5>Semantics:</h5>
+<p>The <tt>llvm.bswap.i16</tt> intrinsic returns an i16 value that has the high
+ and low byte of the input i16 swapped. Similarly,
+ the <tt>llvm.bswap.i32</tt> intrinsic returns an i32 value that has the four
+ bytes of the input i32 swapped, so that if the input bytes are numbered 0, 1,
+ 2, 3 then the returned i32 will have its bytes in 3, 2, 1, 0 order.
+ The <tt>llvm.bswap.i48</tt>, <tt>llvm.bswap.i64</tt> and other intrinsics
+ extend this concept to additional even-byte lengths (6 bytes, 8 bytes and
+ more, respectively).</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_ctpop">'<tt>llvm.ctpop.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use llvm.ctpop on any integer bit
+ width, or on any vector with integer elements. Not all targets support all
+ bit widths or vector types, however.</p>
+
+<pre>
+ declare i8 @llvm.ctpop.i8(i8 &lt;src&gt;)
+ declare i16 @llvm.ctpop.i16(i16 &lt;src&gt;)
+ declare i32 @llvm.ctpop.i32(i32 &lt;src&gt;)
+ declare i64 @llvm.ctpop.i64(i64 &lt;src&gt;)
+ declare i256 @llvm.ctpop.i256(i256 &lt;src&gt;)
+ declare &lt;2 x i32&gt; @llvm.ctpop.v2i32(&lt;2 x i32&gt; &lt;src&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.ctpop</tt>' family of intrinsics counts the number of bits set
+ in a value.</p>
+
+<h5>Arguments:</h5>
+<p>The only argument is the value to be counted. The argument may be of any
+ integer type, or a vector with integer elements.
+ The return type must match the argument type.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.ctpop</tt>' intrinsic counts the 1's in a variable, or within each
+ element of a vector.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_ctlz">'<tt>llvm.ctlz.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.ctlz</tt> on any
+ integer bit width, or any vector whose elements are integers. Not all
+ targets support all bit widths or vector types, however.</p>
+
+<pre>
+ declare i8 @llvm.ctlz.i8 (i8 &lt;src&gt;, i1 &lt;is_zero_undef&gt;)
+ declare i16 @llvm.ctlz.i16 (i16 &lt;src&gt;, i1 &lt;is_zero_undef&gt;)
+ declare i32 @llvm.ctlz.i32 (i32 &lt;src&gt;, i1 &lt;is_zero_undef&gt;)
+ declare i64 @llvm.ctlz.i64 (i64 &lt;src&gt;, i1 &lt;is_zero_undef&gt;)
+ declare i256 @llvm.ctlz.i256(i256 &lt;src&gt;, i1 &lt;is_zero_undef&gt;)
+ declase &lt;2 x i32&gt; @llvm.ctlz.v2i32(&lt;2 x i32&gt; &lt;src&gt;, i1 &lt;is_zero_undef&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.ctlz</tt>' family of intrinsic functions counts the number of
+ leading zeros in a variable.</p>
+
+<h5>Arguments:</h5>
+<p>The first argument is the value to be counted. This argument may be of any
+ integer type, or a vectory with integer element type. The return type
+ must match the first argument type.</p>
+
+<p>The second argument must be a constant and is a flag to indicate whether the
+ intrinsic should ensure that a zero as the first argument produces a defined
+ result. Historically some architectures did not provide a defined result for
+ zero values as efficiently, and many algorithms are now predicated on
+ avoiding zero-value inputs.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.ctlz</tt>' intrinsic counts the leading (most significant)
+ zeros in a variable, or within each element of the vector.
+ If <tt>src == 0</tt> then the result is the size in bits of the type of
+ <tt>src</tt> if <tt>is_zero_undef == 0</tt> and <tt>undef</tt> otherwise.
+ For example, <tt>llvm.ctlz(i32 2) = 30</tt>.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_cttz">'<tt>llvm.cttz.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.cttz</tt> on any
+ integer bit width, or any vector of integer elements. Not all targets
+ support all bit widths or vector types, however.</p>
+
+<pre>
+ declare i8 @llvm.cttz.i8 (i8 &lt;src&gt;, i1 &lt;is_zero_undef&gt;)
+ declare i16 @llvm.cttz.i16 (i16 &lt;src&gt;, i1 &lt;is_zero_undef&gt;)
+ declare i32 @llvm.cttz.i32 (i32 &lt;src&gt;, i1 &lt;is_zero_undef&gt;)
+ declare i64 @llvm.cttz.i64 (i64 &lt;src&gt;, i1 &lt;is_zero_undef&gt;)
+ declare i256 @llvm.cttz.i256(i256 &lt;src&gt;, i1 &lt;is_zero_undef&gt;)
+ declase &lt;2 x i32&gt; @llvm.cttz.v2i32(&lt;2 x i32&gt; &lt;src&gt;, i1 &lt;is_zero_undef&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.cttz</tt>' family of intrinsic functions counts the number of
+ trailing zeros.</p>
+
+<h5>Arguments:</h5>
+<p>The first argument is the value to be counted. This argument may be of any
+ integer type, or a vectory with integer element type. The return type
+ must match the first argument type.</p>
+
+<p>The second argument must be a constant and is a flag to indicate whether the
+ intrinsic should ensure that a zero as the first argument produces a defined
+ result. Historically some architectures did not provide a defined result for
+ zero values as efficiently, and many algorithms are now predicated on
+ avoiding zero-value inputs.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.cttz</tt>' intrinsic counts the trailing (least significant)
+ zeros in a variable, or within each element of a vector.
+ If <tt>src == 0</tt> then the result is the size in bits of the type of
+ <tt>src</tt> if <tt>is_zero_undef == 0</tt> and <tt>undef</tt> otherwise.
+ For example, <tt>llvm.cttz(2) = 1</tt>.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="int_overflow">Arithmetic with Overflow Intrinsics</a>
+</h3>
+
+<div>
+
+<p>LLVM provides intrinsics for some arithmetic with overflow operations.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_sadd_overflow">
+ '<tt>llvm.sadd.with.overflow.*</tt>' Intrinsics
+ </a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.sadd.with.overflow</tt>
+ on any integer bit width.</p>
+
+<pre>
+ declare {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a, i16 %b)
+ declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+ declare {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.sadd.with.overflow</tt>' family of intrinsic functions perform
+ a signed addition of the two arguments, and indicate whether an overflow
+ occurred during the signed summation.</p>
+
+<h5>Arguments:</h5>
+<p>The arguments (%a and %b) and the first element of the result structure may
+ be of integer types of any bit width, but they must have the same bit
+ width. The second element of the result structure must be of
+ type <tt>i1</tt>. <tt>%a</tt> and <tt>%b</tt> are the two values that will
+ undergo signed addition.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.sadd.with.overflow</tt>' family of intrinsic functions perform
+ a signed addition of the two variables. They return a structure &mdash; the
+ first element of which is the signed summation, and the second element of
+ which is a bit specifying if the signed summation resulted in an
+ overflow.</p>
+
+<h5>Examples:</h5>
+<pre>
+ %res = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+ %sum = extractvalue {i32, i1} %res, 0
+ %obit = extractvalue {i32, i1} %res, 1
+ br i1 %obit, label %overflow, label %normal
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_uadd_overflow">
+ '<tt>llvm.uadd.with.overflow.*</tt>' Intrinsics
+ </a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.uadd.with.overflow</tt>
+ on any integer bit width.</p>
+
+<pre>
+ declare {i16, i1} @llvm.uadd.with.overflow.i16(i16 %a, i16 %b)
+ declare {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+ declare {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.uadd.with.overflow</tt>' family of intrinsic functions perform
+ an unsigned addition of the two arguments, and indicate whether a carry
+ occurred during the unsigned summation.</p>
+
+<h5>Arguments:</h5>
+<p>The arguments (%a and %b) and the first element of the result structure may
+ be of integer types of any bit width, but they must have the same bit
+ width. The second element of the result structure must be of
+ type <tt>i1</tt>. <tt>%a</tt> and <tt>%b</tt> are the two values that will
+ undergo unsigned addition.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.uadd.with.overflow</tt>' family of intrinsic functions perform
+ an unsigned addition of the two arguments. They return a structure &mdash;
+ the first element of which is the sum, and the second element of which is a
+ bit specifying if the unsigned summation resulted in a carry.</p>
+
+<h5>Examples:</h5>
+<pre>
+ %res = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+ %sum = extractvalue {i32, i1} %res, 0
+ %obit = extractvalue {i32, i1} %res, 1
+ br i1 %obit, label %carry, label %normal
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_ssub_overflow">
+ '<tt>llvm.ssub.with.overflow.*</tt>' Intrinsics
+ </a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.ssub.with.overflow</tt>
+ on any integer bit width.</p>
+
+<pre>
+ declare {i16, i1} @llvm.ssub.with.overflow.i16(i16 %a, i16 %b)
+ declare {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+ declare {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.ssub.with.overflow</tt>' family of intrinsic functions perform
+ a signed subtraction of the two arguments, and indicate whether an overflow
+ occurred during the signed subtraction.</p>
+
+<h5>Arguments:</h5>
+<p>The arguments (%a and %b) and the first element of the result structure may
+ be of integer types of any bit width, but they must have the same bit
+ width. The second element of the result structure must be of
+ type <tt>i1</tt>. <tt>%a</tt> and <tt>%b</tt> are the two values that will
+ undergo signed subtraction.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.ssub.with.overflow</tt>' family of intrinsic functions perform
+ a signed subtraction of the two arguments. They return a structure &mdash;
+ the first element of which is the subtraction, and the second element of
+ which is a bit specifying if the signed subtraction resulted in an
+ overflow.</p>
+
+<h5>Examples:</h5>
+<pre>
+ %res = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+ %sum = extractvalue {i32, i1} %res, 0
+ %obit = extractvalue {i32, i1} %res, 1
+ br i1 %obit, label %overflow, label %normal
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_usub_overflow">
+ '<tt>llvm.usub.with.overflow.*</tt>' Intrinsics
+ </a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.usub.with.overflow</tt>
+ on any integer bit width.</p>
+
+<pre>
+ declare {i16, i1} @llvm.usub.with.overflow.i16(i16 %a, i16 %b)
+ declare {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+ declare {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.usub.with.overflow</tt>' family of intrinsic functions perform
+ an unsigned subtraction of the two arguments, and indicate whether an
+ overflow occurred during the unsigned subtraction.</p>
+
+<h5>Arguments:</h5>
+<p>The arguments (%a and %b) and the first element of the result structure may
+ be of integer types of any bit width, but they must have the same bit
+ width. The second element of the result structure must be of
+ type <tt>i1</tt>. <tt>%a</tt> and <tt>%b</tt> are the two values that will
+ undergo unsigned subtraction.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.usub.with.overflow</tt>' family of intrinsic functions perform
+ an unsigned subtraction of the two arguments. They return a structure &mdash;
+ the first element of which is the subtraction, and the second element of
+ which is a bit specifying if the unsigned subtraction resulted in an
+ overflow.</p>
+
+<h5>Examples:</h5>
+<pre>
+ %res = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+ %sum = extractvalue {i32, i1} %res, 0
+ %obit = extractvalue {i32, i1} %res, 1
+ br i1 %obit, label %overflow, label %normal
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_smul_overflow">
+ '<tt>llvm.smul.with.overflow.*</tt>' Intrinsics
+ </a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.smul.with.overflow</tt>
+ on any integer bit width.</p>
+
+<pre>
+ declare {i16, i1} @llvm.smul.with.overflow.i16(i16 %a, i16 %b)
+ declare {i32, i1} @llvm.smul.with.overflow.i32(i32 %a, i32 %b)
+ declare {i64, i1} @llvm.smul.with.overflow.i64(i64 %a, i64 %b)
+</pre>
+
+<h5>Overview:</h5>
+
+<p>The '<tt>llvm.smul.with.overflow</tt>' family of intrinsic functions perform
+ a signed multiplication of the two arguments, and indicate whether an
+ overflow occurred during the signed multiplication.</p>
+
+<h5>Arguments:</h5>
+<p>The arguments (%a and %b) and the first element of the result structure may
+ be of integer types of any bit width, but they must have the same bit
+ width. The second element of the result structure must be of
+ type <tt>i1</tt>. <tt>%a</tt> and <tt>%b</tt> are the two values that will
+ undergo signed multiplication.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.smul.with.overflow</tt>' family of intrinsic functions perform
+ a signed multiplication of the two arguments. They return a structure &mdash;
+ the first element of which is the multiplication, and the second element of
+ which is a bit specifying if the signed multiplication resulted in an
+ overflow.</p>
+
+<h5>Examples:</h5>
+<pre>
+ %res = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %a, i32 %b)
+ %sum = extractvalue {i32, i1} %res, 0
+ %obit = extractvalue {i32, i1} %res, 1
+ br i1 %obit, label %overflow, label %normal
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_umul_overflow">
+ '<tt>llvm.umul.with.overflow.*</tt>' Intrinsics
+ </a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use <tt>llvm.umul.with.overflow</tt>
+ on any integer bit width.</p>
+
+<pre>
+ declare {i16, i1} @llvm.umul.with.overflow.i16(i16 %a, i16 %b)
+ declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b)
+ declare {i64, i1} @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.umul.with.overflow</tt>' family of intrinsic functions perform
+ a unsigned multiplication of the two arguments, and indicate whether an
+ overflow occurred during the unsigned multiplication.</p>
+
+<h5>Arguments:</h5>
+<p>The arguments (%a and %b) and the first element of the result structure may
+ be of integer types of any bit width, but they must have the same bit
+ width. The second element of the result structure must be of
+ type <tt>i1</tt>. <tt>%a</tt> and <tt>%b</tt> are the two values that will
+ undergo unsigned multiplication.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.umul.with.overflow</tt>' family of intrinsic functions perform
+ an unsigned multiplication of the two arguments. They return a structure
+ &mdash; the first element of which is the multiplication, and the second
+ element of which is a bit specifying if the unsigned multiplication resulted
+ in an overflow.</p>
+
+<h5>Examples:</h5>
+<pre>
+ %res = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b)
+ %sum = extractvalue {i32, i1} %res, 0
+ %obit = extractvalue {i32, i1} %res, 1
+ br i1 %obit, label %overflow, label %normal
+</pre>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="spec_arithmetic">Specialised Arithmetic Intrinsics</a>
+</h3>
+
+<!-- _______________________________________________________________________ -->
+
+<h4>
+ <a name="fmuladd">'<tt>llvm.fmuladd.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare float @llvm.fmuladd.f32(float %a, float %b, float %c)
+ declare double @llvm.fmuladd.f64(double %a, double %b, double %c)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.fmuladd.*</tt>' intrinsic functions represent multiply-add
+expressions that can be fused if the code generator determines that the fused
+expression would be legal and efficient.</p>
+
+<h5>Arguments:</h5>
+<p>The '<tt>llvm.fmuladd.*</tt>' intrinsics each take three arguments: two
+multiplicands, a and b, and an addend c.</p>
+
+<h5>Semantics:</h5>
+<p>The expression:</p>
+<pre>
+ %0 = call float @llvm.fmuladd.f32(%a, %b, %c)
+</pre>
+<p>is equivalent to the expression a * b + c, except that rounding will not be
+performed between the multiplication and addition steps if the code generator
+fuses the operations. Fusion is not guaranteed, even if the target platform
+supports it. If a fused multiply-add is required the corresponding llvm.fma.*
+intrinsic function should be used instead.</p>
+
+<h5>Examples:</h5>
+<pre>
+ %r2 = call float @llvm.fmuladd.f32(float %a, float %b, float %c) ; yields {float}:r2 = (a * b) + c
+</pre>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="int_fp16">Half Precision Floating Point Intrinsics</a>
+</h3>
+
+<div>
+
+<p>For most target platforms, half precision floating point is a storage-only
+ format. This means that it is
+ a dense encoding (in memory) but does not support computation in the
+ format.</p>
+
+<p>This means that code must first load the half-precision floating point
+ value as an i16, then convert it to float with <a
+ href="#int_convert_from_fp16"><tt>llvm.convert.from.fp16</tt></a>.
+ Computation can then be performed on the float value (including extending to
+ double etc). To store the value back to memory, it is first converted to
+ float if needed, then converted to i16 with
+ <a href="#int_convert_to_fp16"><tt>llvm.convert.to.fp16</tt></a>, then
+ storing as an i16 value.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_convert_to_fp16">
+ '<tt>llvm.convert.to.fp16</tt>' Intrinsic
+ </a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare i16 @llvm.convert.to.fp16(f32 %a)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.convert.to.fp16</tt>' intrinsic function performs
+ a conversion from single precision floating point format to half precision
+ floating point format.</p>
+
+<h5>Arguments:</h5>
+<p>The intrinsic function contains single argument - the value to be
+ converted.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.convert.to.fp16</tt>' intrinsic function performs
+ a conversion from single precision floating point format to half precision
+ floating point format. The return value is an <tt>i16</tt> which
+ contains the converted number.</p>
+
+<h5>Examples:</h5>
+<pre>
+ %res = call i16 @llvm.convert.to.fp16(f32 %a)
+ store i16 %res, i16* @x, align 2
+</pre>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_convert_from_fp16">
+ '<tt>llvm.convert.from.fp16</tt>' Intrinsic
+ </a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare f32 @llvm.convert.from.fp16(i16 %a)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.convert.from.fp16</tt>' intrinsic function performs
+ a conversion from half precision floating point format to single precision
+ floating point format.</p>
+
+<h5>Arguments:</h5>
+<p>The intrinsic function contains single argument - the value to be
+ converted.</p>
+
+<h5>Semantics:</h5>
+<p>The '<tt>llvm.convert.from.fp16</tt>' intrinsic function performs a
+ conversion from half single precision floating point format to single
+ precision floating point format. The input half-float value is represented by
+ an <tt>i16</tt> value.</p>
+
+<h5>Examples:</h5>
+<pre>
+ %a = load i16* @x, align 2
+ %res = call f32 @llvm.convert.from.fp16(i16 %a)
+</pre>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="int_debugger">Debugger Intrinsics</a>
+</h3>
+
+<div>
+
+<p>The LLVM debugger intrinsics (which all start with <tt>llvm.dbg.</tt>
+ prefix), are described in
+ the <a href="SourceLevelDebugging.html#format_common_intrinsics">LLVM Source
+ Level Debugging</a> document.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="int_eh">Exception Handling Intrinsics</a>
+</h3>
+
+<div>
+
+<p>The LLVM exception handling intrinsics (which all start with
+ <tt>llvm.eh.</tt> prefix), are described in
+ the <a href="ExceptionHandling.html#format_common_intrinsics">LLVM Exception
+ Handling</a> document.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="int_trampoline">Trampoline Intrinsics</a>
+</h3>
+
+<div>
+
+<p>These intrinsics make it possible to excise one parameter, marked with
+ the <a href="#nest"><tt>nest</tt></a> attribute, from a function.
+ The result is a callable
+ function pointer lacking the nest parameter - the caller does not need to
+ provide a value for it. Instead, the value to use is stored in advance in a
+ "trampoline", a block of memory usually allocated on the stack, which also
+ contains code to splice the nest value into the argument list. This is used
+ to implement the GCC nested function address extension.</p>
+
+<p>For example, if the function is
+ <tt>i32 f(i8* nest %c, i32 %x, i32 %y)</tt> then the resulting function
+ pointer has signature <tt>i32 (i32, i32)*</tt>. It can be created as
+ follows:</p>
+
+<pre class="doc_code">
+ %tramp = alloca [10 x i8], align 4 ; size and alignment only correct for X86
+ %tramp1 = getelementptr [10 x i8]* %tramp, i32 0, i32 0
+ call i8* @llvm.init.trampoline(i8* %tramp1, i8* bitcast (i32 (i8*, i32, i32)* @f to i8*), i8* %nval)
+ %p = call i8* @llvm.adjust.trampoline(i8* %tramp1)
+ %fp = bitcast i8* %p to i32 (i32, i32)*
+</pre>
+
+<p>The call <tt>%val = call i32 %fp(i32 %x, i32 %y)</tt> is then equivalent
+ to <tt>%val = call i32 %f(i8* %nval, i32 %x, i32 %y)</tt>.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_it">
+ '<tt>llvm.init.trampoline</tt>' Intrinsic
+ </a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.init.trampoline(i8* &lt;tramp&gt;, i8* &lt;func&gt;, i8* &lt;nval&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>This fills the memory pointed to by <tt>tramp</tt> with executable code,
+ turning it into a trampoline.</p>
+
+<h5>Arguments:</h5>
+<p>The <tt>llvm.init.trampoline</tt> intrinsic takes three arguments, all
+ pointers. The <tt>tramp</tt> argument must point to a sufficiently large and
+ sufficiently aligned block of memory; this memory is written to by the
+ intrinsic. Note that the size and the alignment are target-specific - LLVM
+ currently provides no portable way of determining them, so a front-end that
+ generates this intrinsic needs to have some target-specific knowledge.
+ The <tt>func</tt> argument must hold a function bitcast to
+ an <tt>i8*</tt>.</p>
+
+<h5>Semantics:</h5>
+<p>The block of memory pointed to by <tt>tramp</tt> is filled with target
+ dependent code, turning it into a function. Then <tt>tramp</tt> needs to be
+ passed to <a href="#int_at">llvm.adjust.trampoline</a> to get a pointer
+ which can be <a href="#int_trampoline">bitcast (to a new function) and
+ called</a>. The new function's signature is the same as that of
+ <tt>func</tt> with any arguments marked with the <tt>nest</tt> attribute
+ removed. At most one such <tt>nest</tt> argument is allowed, and it must be of
+ pointer type. Calling the new function is equivalent to calling <tt>func</tt>
+ with the same argument list, but with <tt>nval</tt> used for the missing
+ <tt>nest</tt> argument. If, after calling <tt>llvm.init.trampoline</tt>, the
+ memory pointed to by <tt>tramp</tt> is modified, then the effect of any later call
+ to the returned function pointer is undefined.</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_at">
+ '<tt>llvm.adjust.trampoline</tt>' Intrinsic
+ </a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare i8* @llvm.adjust.trampoline(i8* &lt;tramp&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>This performs any required machine-specific adjustment to the address of a
+ trampoline (passed as <tt>tramp</tt>).</p>
+
+<h5>Arguments:</h5>
+<p><tt>tramp</tt> must point to a block of memory which already has trampoline code
+ filled in by a previous call to <a href="#int_it"><tt>llvm.init.trampoline</tt>
+ </a>.</p>
+
+<h5>Semantics:</h5>
+<p>On some architectures the address of the code to be executed needs to be
+ different to the address where the trampoline is actually stored. This
+ intrinsic returns the executable address corresponding to <tt>tramp</tt>
+ after performing the required machine specific adjustments.
+ The pointer returned can then be <a href="#int_trampoline"> bitcast and
+ executed</a>.
+</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="int_memorymarkers">Memory Use Markers</a>
+</h3>
+
+<div>
+
+<p>This class of intrinsics exists to information about the lifetime of memory
+ objects and ranges where variables are immutable.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_lifetime_start">'<tt>llvm.lifetime.start</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.lifetime.start(i64 &lt;size&gt;, i8* nocapture &lt;ptr&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.lifetime.start</tt>' intrinsic specifies the start of a memory
+ object's lifetime.</p>
+
+<h5>Arguments:</h5>
+<p>The first argument is a constant integer representing the size of the
+ object, or -1 if it is variable sized. The second argument is a pointer to
+ the object.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic indicates that before this point in the code, the value of the
+ memory pointed to by <tt>ptr</tt> is dead. This means that it is known to
+ never be used and has an undefined value. A load from the pointer that
+ precedes this intrinsic can be replaced with
+ <tt>'<a href="#undefvalues">undef</a>'</tt>.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_lifetime_end">'<tt>llvm.lifetime.end</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.lifetime.end(i64 &lt;size&gt;, i8* nocapture &lt;ptr&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.lifetime.end</tt>' intrinsic specifies the end of a memory
+ object's lifetime.</p>
+
+<h5>Arguments:</h5>
+<p>The first argument is a constant integer representing the size of the
+ object, or -1 if it is variable sized. The second argument is a pointer to
+ the object.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic indicates that after this point in the code, the value of the
+ memory pointed to by <tt>ptr</tt> is dead. This means that it is known to
+ never be used and has an undefined value. Any stores into the memory object
+ following this intrinsic may be removed as dead.
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_invariant_start">'<tt>llvm.invariant.start</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare {}* @llvm.invariant.start(i64 &lt;size&gt;, i8* nocapture &lt;ptr&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.invariant.start</tt>' intrinsic specifies that the contents of
+ a memory object will not change.</p>
+
+<h5>Arguments:</h5>
+<p>The first argument is a constant integer representing the size of the
+ object, or -1 if it is variable sized. The second argument is a pointer to
+ the object.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic indicates that until an <tt>llvm.invariant.end</tt> that uses
+ the return value, the referenced memory location is constant and
+ unchanging.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_invariant_end">'<tt>llvm.invariant.end</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.invariant.end({}* &lt;start&gt;, i64 &lt;size&gt;, i8* nocapture &lt;ptr&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.invariant.end</tt>' intrinsic specifies that the contents of
+ a memory object are mutable.</p>
+
+<h5>Arguments:</h5>
+<p>The first argument is the matching <tt>llvm.invariant.start</tt> intrinsic.
+ The second argument is a constant integer representing the size of the
+ object, or -1 if it is variable sized and the third argument is a pointer
+ to the object.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic indicates that the memory is mutable again.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="int_general">General Intrinsics</a>
+</h3>
+
+<div>
+
+<p>This class of intrinsics is designed to be generic and has no specific
+ purpose.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_var_annotation">'<tt>llvm.var.annotation</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.var.annotation(i8* &lt;val&gt;, i8* &lt;str&gt;, i8* &lt;str&gt;, i32 &lt;int&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.var.annotation</tt>' intrinsic.</p>
+
+<h5>Arguments:</h5>
+<p>The first argument is a pointer to a value, the second is a pointer to a
+ global string, the third is a pointer to a global string which is the source
+ file name, and the last argument is the line number.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic allows annotation of local variables with arbitrary strings.
+ This can be useful for special purpose optimizations that want to look for
+ these annotations. These have no other defined use; they are ignored by code
+ generation and optimization.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_annotation">'<tt>llvm.annotation.*</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<p>This is an overloaded intrinsic. You can use '<tt>llvm.annotation</tt>' on
+ any integer bit width.</p>
+
+<pre>
+ declare i8 @llvm.annotation.i8(i8 &lt;val&gt;, i8* &lt;str&gt;, i8* &lt;str&gt;, i32 &lt;int&gt;)
+ declare i16 @llvm.annotation.i16(i16 &lt;val&gt;, i8* &lt;str&gt;, i8* &lt;str&gt;, i32 &lt;int&gt;)
+ declare i32 @llvm.annotation.i32(i32 &lt;val&gt;, i8* &lt;str&gt;, i8* &lt;str&gt;, i32 &lt;int&gt;)
+ declare i64 @llvm.annotation.i64(i64 &lt;val&gt;, i8* &lt;str&gt;, i8* &lt;str&gt;, i32 &lt;int&gt;)
+ declare i256 @llvm.annotation.i256(i256 &lt;val&gt;, i8* &lt;str&gt;, i8* &lt;str&gt;, i32 &lt;int&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.annotation</tt>' intrinsic.</p>
+
+<h5>Arguments:</h5>
+<p>The first argument is an integer value (result of some expression), the
+ second is a pointer to a global string, the third is a pointer to a global
+ string which is the source file name, and the last argument is the line
+ number. It returns the value of the first argument.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic allows annotations to be put on arbitrary expressions with
+ arbitrary strings. This can be useful for special purpose optimizations that
+ want to look for these annotations. These have no other defined use; they
+ are ignored by code generation and optimization.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_trap">'<tt>llvm.trap</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.trap() noreturn nounwind
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.trap</tt>' intrinsic.</p>
+
+<h5>Arguments:</h5>
+<p>None.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic is lowered to the target dependent trap instruction. If the
+ target does not have a trap instruction, this intrinsic will be lowered to
+ a call of the <tt>abort()</tt> function.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_debugtrap">'<tt>llvm.debugtrap</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.debugtrap() nounwind
+</pre>
+
+<h5>Overview:</h5>
+<p>The '<tt>llvm.debugtrap</tt>' intrinsic.</p>
+
+<h5>Arguments:</h5>
+<p>None.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic is lowered to code which is intended to cause an execution
+ trap with the intention of requesting the attention of a debugger.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_stackprotector">'<tt>llvm.stackprotector</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.stackprotector(i8* &lt;guard&gt;, i8** &lt;slot&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The <tt>llvm.stackprotector</tt> intrinsic takes the <tt>guard</tt> and
+ stores it onto the stack at <tt>slot</tt>. The stack slot is adjusted to
+ ensure that it is placed on the stack before local variables.</p>
+
+<h5>Arguments:</h5>
+<p>The <tt>llvm.stackprotector</tt> intrinsic requires two pointer
+ arguments. The first argument is the value loaded from the stack
+ guard <tt>@__stack_chk_guard</tt>. The second variable is an <tt>alloca</tt>
+ that has enough space to hold the value of the guard.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic causes the prologue/epilogue inserter to force the position of
+ the <tt>AllocaInst</tt> stack slot to be before local variables on the
+ stack. This is to ensure that if a local variable on the stack is
+ overwritten, it will destroy the value of the guard. When the function exits,
+ the guard on the stack is checked against the original guard. If they are
+ different, then the program aborts by calling the <tt>__stack_chk_fail()</tt>
+ function.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_objectsize">'<tt>llvm.objectsize</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare i32 @llvm.objectsize.i32(i8* &lt;object&gt;, i1 &lt;min&gt;)
+ declare i64 @llvm.objectsize.i64(i8* &lt;object&gt;, i1 &lt;min&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The <tt>llvm.objectsize</tt> intrinsic is designed to provide information to
+ the optimizers to determine at compile time whether a) an operation (like
+ memcpy) will overflow a buffer that corresponds to an object, or b) that a
+ runtime check for overflow isn't necessary. An object in this context means
+ an allocation of a specific class, structure, array, or other object.</p>
+
+<h5>Arguments:</h5>
+<p>The <tt>llvm.objectsize</tt> intrinsic takes two arguments. The first
+ argument is a pointer to or into the <tt>object</tt>. The second argument
+ is a boolean and determines whether <tt>llvm.objectsize</tt> returns 0 (if
+ true) or -1 (if false) when the object size is unknown.
+ The second argument only accepts constants.</p>
+
+<h5>Semantics:</h5>
+<p>The <tt>llvm.objectsize</tt> intrinsic is lowered to a constant representing
+ the size of the object concerned. If the size cannot be determined at compile
+ time, <tt>llvm.objectsize</tt> returns <tt>i32/i64 -1 or 0</tt>
+ (depending on the <tt>min</tt> argument).</p>
+
+</div>
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_expect">'<tt>llvm.expect</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare i32 @llvm.expect.i32(i32 &lt;val&gt;, i32 &lt;expected_val&gt;)
+ declare i64 @llvm.expect.i64(i64 &lt;val&gt;, i64 &lt;expected_val&gt;)
+</pre>
+
+<h5>Overview:</h5>
+<p>The <tt>llvm.expect</tt> intrinsic provides information about expected (the
+ most probable) value of <tt>val</tt>, which can be used by optimizers.</p>
+
+<h5>Arguments:</h5>
+<p>The <tt>llvm.expect</tt> intrinsic takes two arguments. The first
+ argument is a value. The second argument is an expected value, this needs to
+ be a constant value, variables are not allowed.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic is lowered to the <tt>val</tt>.</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="int_donothing">'<tt>llvm.donothing</tt>' Intrinsic</a>
+</h4>
+
+<div>
+
+<h5>Syntax:</h5>
+<pre>
+ declare void @llvm.donothing() nounwind readnone
+</pre>
+
+<h5>Overview:</h5>
+<p>The <tt>llvm.donothing</tt> intrinsic doesn't perform any operation. It's the
+only intrinsic that can be called with an invoke instruction.</p>
+
+<h5>Arguments:</h5>
+<p>None.</p>
+
+<h5>Semantics:</h5>
+<p>This intrinsic does nothing, and it's removed by optimizers and ignored by
+codegen.</p>
+</div>
+
+</div>
+
+</div>
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+
+</body>
+</html>
diff --git a/docs/Lexicon.rst b/docs/Lexicon.rst
new file mode 100644
index 00000000000..6ebe61429f9
--- /dev/null
+++ b/docs/Lexicon.rst
@@ -0,0 +1,194 @@
+.. _lexicon:
+
+================
+The LLVM Lexicon
+================
+
+.. note::
+
+ This document is a work in progress!
+
+Definitions
+===========
+
+A
+-
+
+**ADCE**
+ Aggressive Dead Code Elimination
+
+B
+-
+
+**BURS**
+
+ Bottom Up Rewriting System --- A method of instruction selection for code
+ generation. An example is the `BURG
+ <http://www.program-transformation.org/Transform/BURG>`_ tool.
+
+C
+-
+
+**CSE**
+ Common Subexpression Elimination. An optimization that removes common
+ subexpression compuation. For example ``(a+b)*(a+b)`` has two subexpressions
+ that are the same: ``(a+b)``. This optimization would perform the addition
+ only once and then perform the multiply (but only if it's compulationally
+ correct/safe).
+
+D
+-
+
+**DAG**
+ Directed Acyclic Graph
+
+.. _derived pointer:
+.. _derived pointers:
+
+**Derived Pointer**
+ A pointer to the interior of an object, such that a garbage collector is
+ unable to use the pointer for reachability analysis. While a derived pointer
+ is live, the corresponding object pointer must be kept in a root, otherwise
+ the collector might free the referenced object. With copying collectors,
+ derived pointers pose an additional hazard that they may be invalidated at
+ any `safe point`_. This term is used in opposition to `object pointer`_.
+
+**DSA**
+ Data Structure Analysis
+
+**DSE**
+ Dead Store Elimination
+
+F
+-
+
+**FCA**
+ First Class Aggregate
+
+G
+-
+
+**GC**
+ Garbage Collection. The practice of using reachability analysis instead of
+ explicit memory management to reclaim unused memory.
+
+H
+-
+
+.. _heap:
+
+**Heap**
+ In garbage collection, the region of memory which is managed using
+ reachability analysis.
+
+I
+-
+
+**IPA**
+ Inter-Procedural Analysis. Refers to any variety of code analysis that
+ occurs between procedures, functions or compilation units (modules).
+
+**IPO**
+ Inter-Procedural Optimization. Refers to any variety of code optimization
+ that occurs between procedures, functions or compilation units (modules).
+
+**ISel**
+ Instruction Selection
+
+L
+-
+
+**LCSSA**
+ Loop-Closed Static Single Assignment Form
+
+**LICM**
+ Loop Invariant Code Motion
+
+**Load-VN**
+ Load Value Numbering
+
+**LTO**
+ Link-Time Optimization
+
+M
+-
+
+**MC**
+ Machine Code
+
+O
+-
+.. _object pointer:
+.. _object pointers:
+
+**Object Pointer**
+ A pointer to an object such that the garbage collector is able to trace
+ references contained within the object. This term is used in opposition to
+ `derived pointer`_.
+
+P
+-
+
+**PRE**
+ Partial Redundancy Elimination
+
+R
+-
+
+**RAUW**
+
+ Replace All Uses With. The functions ``User::replaceUsesOfWith()``,
+ ``Value::replaceAllUsesWith()``, and
+ ``Constant::replaceUsesOfWithOnConstant()`` implement the replacement of one
+ Value with another by iterating over its def/use chain and fixing up all of
+ the pointers to point to the new value. See
+ also `def/use chains <ProgrammersManual.html#iterate_chains>`_.
+
+**Reassociation**
+ Rearranging associative expressions to promote better redundancy elimination
+ and other optimization. For example, changing ``(A+B-A)`` into ``(B+A-A)``,
+ permitting it to be optimized into ``(B+0)`` then ``(B)``.
+
+.. _roots:
+.. _stack roots:
+
+**Root**
+ In garbage collection, a pointer variable lying outside of the `heap`_ from
+ which the collector begins its reachability analysis. In the context of code
+ generation, "root" almost always refers to a "stack root" --- a local or
+ temporary variable within an executing function.</dd>
+
+**RPO**
+ Reverse postorder
+
+S
+-
+
+.. _safe point:
+
+**Safe Point**
+ In garbage collection, it is necessary to identify `stack roots`_ so that
+ reachability analysis may proceed. It may be infeasible to provide this
+ information for every instruction, so instead the information may is
+ calculated only at designated safe points. With a copying collector,
+ `derived pointers`_ must not be retained across safe points and `object
+ pointers`_ must be reloaded from stack roots.
+
+**SDISel**
+ Selection DAG Instruction Selection.
+
+**SCC**
+ Strongly Connected Component
+
+**SCCP**
+ Sparse Conditional Constant Propagation
+
+**SRoA**
+ Scalar Replacement of Aggregates
+
+**SSA**
+ Static Single Assignment
+
+**Stack Map**
+ In garbage collection, metadata emitted by the code generator which
+ identifies `roots`_ within the stack frame of an executing function.
diff --git a/docs/LinkTimeOptimization.rst b/docs/LinkTimeOptimization.rst
new file mode 100644
index 00000000000..53d673e4066
--- /dev/null
+++ b/docs/LinkTimeOptimization.rst
@@ -0,0 +1,298 @@
+.. _lto:
+
+======================================================
+LLVM Link Time Optimization: Design and Implementation
+======================================================
+
+.. contents::
+ :local:
+
+Description
+===========
+
+LLVM features powerful intermodular optimizations which can be used at link
+time. Link Time Optimization (LTO) is another name for intermodular
+optimization when performed during the link stage. This document describes the
+interface and design between the LTO optimizer and the linker.
+
+Design Philosophy
+=================
+
+The LLVM Link Time Optimizer provides complete transparency, while doing
+intermodular optimization, in the compiler tool chain. Its main goal is to let
+the developer take advantage of intermodular optimizations without making any
+significant changes to the developer's makefiles or build system. This is
+achieved through tight integration with the linker. In this model, the linker
+treates LLVM bitcode files like native object files and allows mixing and
+matching among them. The linker uses `libLTO`_, a shared object, to handle LLVM
+bitcode files. This tight integration between the linker and LLVM optimizer
+helps to do optimizations that are not possible in other models. The linker
+input allows the optimizer to avoid relying on conservative escape analysis.
+
+Example of link time optimization
+---------------------------------
+
+The following example illustrates the advantages of LTO's integrated approach
+and clean interface. This example requires a system linker which supports LTO
+through the interface described in this document. Here, clang transparently
+invokes system linker.
+
+* Input source file ``a.c`` is compiled into LLVM bitcode form.
+* Input source file ``main.c`` is compiled into native object code.
+
+.. code-block:: c++
+
+ --- a.h ---
+ extern int foo1(void);
+ extern void foo2(void);
+ extern void foo4(void);
+
+ --- a.c ---
+ #include "a.h"
+
+ static signed int i = 0;
+
+ void foo2(void) {
+ i = -1;
+ }
+
+ static int foo3() {
+ foo4();
+ return 10;
+ }
+
+ int foo1(void) {
+ int data = 0;
+
+ if (i < 0)
+ data = foo3();
+
+ data = data + 42;
+ return data;
+ }
+
+ --- main.c ---
+ #include <stdio.h>
+ #include "a.h"
+
+ void foo4(void) {
+ printf("Hi\n");
+ }
+
+ int main() {
+ return foo1();
+ }
+
+.. code-block:: bash
+
+ --- command lines ---
+ % clang -emit-llvm -c a.c -o a.o # <-- a.o is LLVM bitcode file
+ % clang -c main.c -o main.o # <-- main.o is native object file
+ % clang a.o main.o -o main # <-- standard link command without modifications
+
+* In this example, the linker recognizes that ``foo2()`` is an externally
+ visible symbol defined in LLVM bitcode file. The linker completes its usual
+ symbol resolution pass and finds that ``foo2()`` is not used
+ anywhere. This information is used by the LLVM optimizer and it
+ removes ``foo2()``.</li>
+
+* As soon as ``foo2()`` is removed, the optimizer recognizes that condition ``i
+ < 0`` is always false, which means ``foo3()`` is never used. Hence, the
+ optimizer also removes ``foo3()``.
+
+* And this in turn, enables linker to remove ``foo4()``.
+
+This example illustrates the advantage of tight integration with the
+linker. Here, the optimizer can not remove ``foo3()`` without the linker's
+input.
+
+Alternative Approaches
+----------------------
+
+**Compiler driver invokes link time optimizer separately.**
+ In this model the link time optimizer is not able to take advantage of
+ information collected during the linker's normal symbol resolution phase.
+ In the above example, the optimizer can not remove ``foo2()`` without the
+ linker's input because it is externally visible. This in turn prohibits the
+ optimizer from removing ``foo3()``.
+
+**Use separate tool to collect symbol information from all object files.**
+ In this model, a new, separate, tool or library replicates the linker's
+ capability to collect information for link time optimization. Not only is
+ this code duplication difficult to justify, but it also has several other
+ disadvantages. For example, the linking semantics and the features provided
+ by the linker on various platform are not unique. This means, this new tool
+ needs to support all such features and platforms in one super tool or a
+ separate tool per platform is required. This increases maintenance cost for
+ link time optimizer significantly, which is not necessary. This approach
+ also requires staying synchronized with linker developements on various
+ platforms, which is not the main focus of the link time optimizer. Finally,
+ this approach increases end user's build time due to the duplication of work
+ done by this separate tool and the linker itself.
+
+Multi-phase communication between ``libLTO`` and linker
+=======================================================
+
+The linker collects information about symbol defininitions and uses in various
+link objects which is more accurate than any information collected by other
+tools during typical build cycles. The linker collects this information by
+looking at the definitions and uses of symbols in native .o files and using
+symbol visibility information. The linker also uses user-supplied information,
+such as a list of exported symbols. LLVM optimizer collects control flow
+information, data flow information and knows much more about program structure
+from the optimizer's point of view. Our goal is to take advantage of tight
+integration between the linker and the optimizer by sharing this information
+during various linking phases.
+
+Phase 1 : Read LLVM Bitcode Files
+---------------------------------
+
+The linker first reads all object files in natural order and collects symbol
+information. This includes native object files as well as LLVM bitcode files.
+To minimize the cost to the linker in the case that all .o files are native
+object files, the linker only calls ``lto_module_create()`` when a supplied
+object file is found to not be a native object file. If ``lto_module_create()``
+returns that the file is an LLVM bitcode file, the linker then iterates over the
+module using ``lto_module_get_symbol_name()`` and
+``lto_module_get_symbol_attribute()`` to get all symbols defined and referenced.
+This information is added to the linker's global symbol table.
+
+
+The lto* functions are all implemented in a shared object libLTO. This allows
+the LLVM LTO code to be updated independently of the linker tool. On platforms
+that support it, the shared object is lazily loaded.
+
+Phase 2 : Symbol Resolution
+---------------------------
+
+In this stage, the linker resolves symbols using global symbol table. It may
+report undefined symbol errors, read archive members, replace weak symbols, etc.
+The linker is able to do this seamlessly even though it does not know the exact
+content of input LLVM bitcode files. If dead code stripping is enabled then the
+linker collects the list of live symbols.
+
+Phase 3 : Optimize Bitcode Files
+--------------------------------
+
+After symbol resolution, the linker tells the LTO shared object which symbols
+are needed by native object files. In the example above, the linker reports
+that only ``foo1()`` is used by native object files using
+``lto_codegen_add_must_preserve_symbol()``. Next the linker invokes the LLVM
+optimizer and code generators using ``lto_codegen_compile()`` which returns a
+native object file creating by merging the LLVM bitcode files and applying
+various optimization passes.
+
+Phase 4 : Symbol Resolution after optimization
+----------------------------------------------
+
+In this phase, the linker reads optimized a native object file and updates the
+internal global symbol table to reflect any changes. The linker also collects
+information about any changes in use of external symbols by LLVM bitcode
+files. In the example above, the linker notes that ``foo4()`` is not used any
+more. If dead code stripping is enabled then the linker refreshes the live
+symbol information appropriately and performs dead code stripping.
+
+After this phase, the linker continues linking as if it never saw LLVM bitcode
+files.
+
+.. _libLTO:
+
+``libLTO``
+==========
+
+``libLTO`` is a shared object that is part of the LLVM tools, and is intended
+for use by a linker. ``libLTO`` provides an abstract C interface to use the LLVM
+interprocedural optimizer without exposing details of LLVM's internals. The
+intention is to keep the interface as stable as possible even when the LLVM
+optimizer continues to evolve. It should even be possible for a completely
+different compilation technology to provide a different libLTO that works with
+their object files and the standard linker tool.
+
+``lto_module_t``
+----------------
+
+A non-native object file is handled via an ``lto_module_t``. The following
+functions allow the linker to check if a file (on disk or in a memory buffer) is
+a file which libLTO can process:
+
+.. code-block:: c
+
+ lto_module_is_object_file(const char*)
+ lto_module_is_object_file_for_target(const char*, const char*)
+ lto_module_is_object_file_in_memory(const void*, size_t)
+ lto_module_is_object_file_in_memory_for_target(const void*, size_t, const char*)
+
+If the object file can be processed by ``libLTO``, the linker creates a
+``lto_module_t`` by using one of:
+
+.. code-block:: c
+
+ lto_module_create(const char*)
+ lto_module_create_from_memory(const void*, size_t)
+
+and when done, the handle is released via
+
+.. code-block:: c
+
+ lto_module_dispose(lto_module_t)
+
+
+The linker can introspect the non-native object file by getting the number of
+symbols and getting the name and attributes of each symbol via:
+
+.. code-block:: c
+
+ lto_module_get_num_symbols(lto_module_t)
+ lto_module_get_symbol_name(lto_module_t, unsigned int)
+ lto_module_get_symbol_attribute(lto_module_t, unsigned int)
+
+The attributes of a symbol include the alignment, visibility, and kind.
+
+``lto_code_gen_t``
+------------------
+
+Once the linker has loaded each non-native object files into an
+``lto_module_t``, it can request ``libLTO`` to process them all and generate a
+native object file. This is done in a couple of steps. First, a code generator
+is created with:
+
+.. code-block:: c
+
+ lto_codegen_create()
+
+Then, each non-native object file is added to the code generator with:
+
+.. code-block:: c
+
+ lto_codegen_add_module(lto_code_gen_t, lto_module_t)
+
+The linker then has the option of setting some codegen options. Whether or not
+to generate DWARF debug info is set with:
+
+.. code-block:: c
+
+ lto_codegen_set_debug_model(lto_code_gen_t)
+
+Which kind of position independence is set with:
+
+.. code-block:: c
+
+ lto_codegen_set_pic_model(lto_code_gen_t)
+
+And each symbol that is referenced by a native object file or otherwise must not
+be optimized away is set with:
+
+.. code-block:: c
+
+ lto_codegen_add_must_preserve_symbol(lto_code_gen_t, const char*)
+
+After all these settings are done, the linker requests that a native object file
+be created from the modules with the settings using:
+
+.. code-block:: c
+
+ lto_codegen_compile(lto_code_gen_t, size*)
+
+which returns a pointer to a buffer containing the generated native object file.
+The linker then parses that and links it with the rest of the native object
+files.
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 00000000000..122c4b834bb
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,127 @@
+##===- docs/Makefile ---------------------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL := ..
+DIRS :=
+
+ifdef BUILD_FOR_WEBSITE
+PROJ_OBJ_DIR = .
+DOXYGEN = doxygen
+
+$(PROJ_OBJ_DIR)/doxygen.cfg: doxygen.cfg.in
+ cat $< | sed \
+ -e 's/@abs_top_srcdir@/../g' \
+ -e 's/@DOT@/dot/g' \
+ -e 's/@PACKAGE_VERSION@/mainline/' \
+ -e 's/@abs_top_builddir@/../g' > $@
+endif
+
+include $(LEVEL)/Makefile.common
+
+HTML := $(wildcard $(PROJ_SRC_DIR)/*.html) \
+ $(wildcard $(PROJ_SRC_DIR)/*.css)
+DOXYFILES := doxygen.cfg.in doxygen.css doxygen.footer doxygen.header \
+ doxygen.intro
+EXTRA_DIST := $(HTML) $(DOXYFILES) llvm.css CommandGuide
+
+.PHONY: install-html install-doxygen doxygen install-ocamldoc ocamldoc generated
+
+install_targets := install-html
+ifeq ($(ENABLE_DOXYGEN),1)
+install_targets += install-doxygen
+endif
+ifdef OCAMLDOC
+ifneq (,$(filter ocaml,$(BINDINGS_TO_BUILD)))
+install_targets += install-ocamldoc
+endif
+endif
+install-local:: $(install_targets)
+
+generated_targets := doxygen
+ifdef OCAMLDOC
+generated_targets += ocamldoc
+endif
+
+# Live documentation is generated for the web site using this target:
+# 'make generated BUILD_FOR_WEBSITE=1'
+generated:: $(generated_targets)
+
+install-html: $(PROJ_OBJ_DIR)/html.tar.gz
+ $(Echo) Installing HTML documentation
+ $(Verb) $(MKDIR) $(DESTDIR)$(PROJ_docsdir)/html
+ $(Verb) $(DataInstall) $(HTML) $(DESTDIR)$(PROJ_docsdir)/html
+ $(Verb) $(DataInstall) $(PROJ_OBJ_DIR)/html.tar.gz $(DESTDIR)$(PROJ_docsdir)
+
+$(PROJ_OBJ_DIR)/html.tar.gz: $(HTML)
+ $(Echo) Packaging HTML documentation
+ $(Verb) $(RM) -rf $@ $(PROJ_OBJ_DIR)/html.tar
+ $(Verb) cd $(PROJ_SRC_DIR) && \
+ $(TAR) cf $(PROJ_OBJ_DIR)/html.tar *.html
+ $(Verb) $(GZIPBIN) $(PROJ_OBJ_DIR)/html.tar
+
+install-doxygen: doxygen
+ $(Echo) Installing doxygen documentation
+ $(Verb) $(MKDIR) $(DESTDIR)$(PROJ_docsdir)/html/doxygen
+ $(Verb) $(DataInstall) $(PROJ_OBJ_DIR)/doxygen.tar.gz $(DESTDIR)$(PROJ_docsdir)
+ $(Verb) cd $(PROJ_OBJ_DIR)/doxygen && \
+ $(FIND) . -type f -exec \
+ $(DataInstall) {} $(DESTDIR)$(PROJ_docsdir)/html/doxygen \;
+
+doxygen: regendoc $(PROJ_OBJ_DIR)/doxygen.tar.gz
+
+regendoc:
+ $(Echo) Building doxygen documentation
+ $(Verb) if test -e $(PROJ_OBJ_DIR)/doxygen ; then \
+ $(RM) -rf $(PROJ_OBJ_DIR)/doxygen ; \
+ fi
+ $(Verb) $(DOXYGEN) $(PROJ_OBJ_DIR)/doxygen.cfg
+
+$(PROJ_OBJ_DIR)/doxygen.tar.gz: $(DOXYFILES) $(PROJ_OBJ_DIR)/doxygen.cfg
+ $(Echo) Packaging doxygen documentation
+ $(Verb) $(RM) -rf $@ $(PROJ_OBJ_DIR)/doxygen.tar
+ $(Verb) $(TAR) cf $(PROJ_OBJ_DIR)/doxygen.tar doxygen
+ $(Verb) $(GZIPBIN) $(PROJ_OBJ_DIR)/doxygen.tar
+ $(Verb) $(CP) $(PROJ_OBJ_DIR)/doxygen.tar.gz $(PROJ_OBJ_DIR)/doxygen/html/
+
+userloc: $(LLVM_SRC_ROOT)/docs/userloc.html
+
+$(LLVM_SRC_ROOT)/docs/userloc.html:
+ $(Echo) Making User LOC Table
+ $(Verb) cd $(LLVM_SRC_ROOT) ; ./utils/userloc.pl -details -recurse \
+ -html lib include tools runtime utils examples autoconf test > docs/userloc.html
+
+install-ocamldoc: ocamldoc
+ $(Echo) Installing ocamldoc documentation
+ $(Verb) $(MKDIR) $(DESTDIR)$(PROJ_docsdir)/ocamldoc/html
+ $(Verb) $(DataInstall) $(PROJ_OBJ_DIR)/ocamldoc.tar.gz $(DESTDIR)$(PROJ_docsdir)
+ $(Verb) cd $(PROJ_OBJ_DIR)/ocamldoc && \
+ $(FIND) . -type f -exec \
+ $(DataInstall) {} $(DESTDIR)$(PROJ_docsdir)/ocamldoc/html \;
+
+ocamldoc: regen-ocamldoc
+ $(Echo) Packaging ocamldoc documentation
+ $(Verb) $(RM) -rf $(PROJ_OBJ_DIR)/ocamldoc.tar*
+ $(Verb) $(TAR) cf $(PROJ_OBJ_DIR)/ocamldoc.tar ocamldoc
+ $(Verb) $(GZIPBIN) $(PROJ_OBJ_DIR)/ocamldoc.tar
+ $(Verb) $(CP) $(PROJ_OBJ_DIR)/ocamldoc.tar.gz $(PROJ_OBJ_DIR)/ocamldoc/html/
+
+regen-ocamldoc:
+ $(Echo) Building ocamldoc documentation
+ $(Verb) if test -e $(PROJ_OBJ_DIR)/ocamldoc ; then \
+ $(RM) -rf $(PROJ_OBJ_DIR)/ocamldoc ; \
+ fi
+ $(Verb) $(MAKE) -C $(LEVEL)/bindings/ocaml ocamldoc
+ $(Verb) $(MKDIR) $(PROJ_OBJ_DIR)/ocamldoc/html
+ $(Verb) \
+ $(OCAMLDOC) -d $(PROJ_OBJ_DIR)/ocamldoc/html -sort -colorize-code -html \
+ `$(FIND) $(LEVEL)/bindings/ocaml -name "*.odoc" -exec echo -load '{}' ';'`
+
+uninstall-local::
+ $(Echo) Uninstalling Documentation
+ $(Verb) $(RM) -rf $(DESTDIR)$(PROJ_docsdir)
diff --git a/docs/Makefile.sphinx b/docs/Makefile.sphinx
new file mode 100644
index 00000000000..81c13de9cd9
--- /dev/null
+++ b/docs/Makefile.sphinx
@@ -0,0 +1,159 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+all: html
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @# FIXME: Remove this `cp` once HTML->Sphinx transition is completed.
+ @# Kind of a hack, but HTML-formatted docs are on the way out anyway.
+ @echo "Copying legacy HTML-formatted docs into $(BUILDDIR)/html"
+ @cp -a *.html tutorial $(BUILDDIR)/html
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/llvm.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/llvm.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/llvm"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/llvm"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/docs/MakefileGuide.rst b/docs/MakefileGuide.rst
new file mode 100644
index 00000000000..d2bdd24a9e7
--- /dev/null
+++ b/docs/MakefileGuide.rst
@@ -0,0 +1,956 @@
+.. _makefile_guide:
+
+===================
+LLVM Makefile Guide
+===================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+This document provides *usage* information about the LLVM makefile system. While
+loosely patterned after the BSD makefile system, LLVM has taken a departure from
+BSD in order to implement additional features needed by LLVM. Although makefile
+systems, such as ``automake``, were attempted at one point, it has become clear
+that the features needed by LLVM and the ``Makefile`` norm are too great to use
+a more limited tool. Consequently, LLVM requires simply GNU Make 3.79, a widely
+portable makefile processor. LLVM unabashedly makes heavy use of the features of
+GNU Make so the dependency on GNU Make is firm. If you're not familiar with
+``make``, it is recommended that you read the `GNU Makefile Manual
+<http://www.gnu.org/software/make/manual/make.html>`_.
+
+While this document is rightly part of the `LLVM Programmer's
+Manual <ProgrammersManual.html>`_, it is treated separately here because of the
+volume of content and because it is often an early source of bewilderment for
+new developers.
+
+General Concepts
+================
+
+The LLVM Makefile System is the component of LLVM that is responsible for
+building the software, testing it, generating distributions, checking those
+distributions, installing and uninstalling, etc. It consists of a several files
+throughout the source tree. These files and other general concepts are described
+in this section.
+
+Projects
+--------
+
+The LLVM Makefile System is quite generous. It not only builds its own software,
+but it can build yours too. Built into the system is knowledge of the
+``llvm/projects`` directory. Any directory under ``projects`` that has both a
+``configure`` script and a ``Makefile`` is assumed to be a project that uses the
+LLVM Makefile system. Building software that uses LLVM does not require the
+LLVM Makefile System nor even placement in the ``llvm/projects``
+directory. However, doing so will allow your project to get up and running
+quickly by utilizing the built-in features that are used to compile LLVM. LLVM
+compiles itself using the same features of the makefile system as used for
+projects.
+
+For complete details on setting up your projects configuration, simply mimic the
+``llvm/projects/sample`` project. Or for further details, consult the
+`Projects <Projects.html>`_ page.
+
+Variable Values
+---------------
+
+To use the makefile system, you simply create a file named ``Makefile`` in your
+directory and declare values for certain variables. The variables and values
+that you select determine what the makefile system will do. These variables
+enable rules and processing in the makefile system that automatically Do The
+Right Thing&trade;.
+
+Including Makefiles
+-------------------
+
+Setting variables alone is not enough. You must include into your Makefile
+additional files that provide the rules of the LLVM Makefile system. The various
+files involved are described in the sections that follow.
+
+``Makefile``
+^^^^^^^^^^^^
+
+Each directory to participate in the build needs to have a file named
+``Makefile``. This is the file first read by ``make``. It has three
+sections:
+
+#. Settable Variables --- Required that must be set first.
+#. ``include $(LEVEL)/Makefile.common`` --- include the LLVM Makefile system.
+#. Override Variables --- Override variables set by the LLVM Makefile system.
+
+.. _$(LEVEL)/Makefile.common:
+
+``Makefile.common``
+^^^^^^^^^^^^^^^^^^^
+
+Every project must have a ``Makefile.common`` file at its top source
+directory. This file serves three purposes:
+
+#. It includes the project's configuration makefile to obtain values determined
+ by the ``configure`` script. This is done by including the
+ `$(LEVEL)/Makefile.config`_ file.
+
+#. It specifies any other (static) values that are needed throughout the
+ project. Only values that are used in all or a large proportion of the
+ project's directories should be placed here.
+
+#. It includes the standard rules for the LLVM Makefile system,
+ `$(LLVM_SRC_ROOT)/Makefile.rules`_. This file is the *guts* of the LLVM
+ ``Makefile`` system.
+
+.. _$(LEVEL)/Makefile.config:
+
+``Makefile.config``
+^^^^^^^^^^^^^^^^^^^
+
+Every project must have a ``Makefile.config`` at the top of its *build*
+directory. This file is **generated** by the ``configure`` script from the
+pattern provided by the ``Makefile.config.in`` file located at the top of the
+project's *source* directory. The contents of this file depend largely on what
+configuration items the project uses, however most projects can get what they
+need by just relying on LLVM's configuration found in
+``$(LLVM_OBJ_ROOT)/Makefile.config``.
+
+.. _$(LLVM_SRC_ROOT)/Makefile.rules:
+
+``Makefile.rules``
+^^^^^^^^^^^^^^^^^^
+
+This file, located at ``$(LLVM_SRC_ROOT)/Makefile.rules`` is the heart of the
+LLVM Makefile System. It provides all the logic, dependencies, and rules for
+building the targets supported by the system. What it does largely depends on
+the values of ``make`` `variables`_ that have been set *before*
+``Makefile.rules`` is included.
+
+Comments
+^^^^^^^^
+
+User ``Makefile``\s need not have comments in them unless the construction is
+unusual or it does not strictly follow the rules and patterns of the LLVM
+makefile system. Makefile comments are invoked with the pound (``#``) character.
+The ``#`` character and any text following it, to the end of the line, are
+ignored by ``make``.
+
+Tutorial
+========
+
+This section provides some examples of the different kinds of modules you can
+build with the LLVM makefile system. In general, each directory you provide will
+build a single object although that object may be composed of additionally
+compiled components.
+
+Libraries
+---------
+
+Only a few variable definitions are needed to build a regular library.
+Normally, the makefile system will build all the software into a single
+``libname.o`` (pre-linked) object. This means the library is not searchable and
+that the distinction between compilation units has been dissolved. Optionally,
+you can ask for a shared library (.so) or archive library (.a) built. Archive
+libraries are the default. For example:
+
+.. code-block:: makefile
+
+ LIBRARYNAME = mylib
+ SHARED_LIBRARY = 1
+ ARCHIVE_LIBRARY = 1
+
+says to build a library named ``mylib`` with both a shared library
+(``mylib.so``) and an archive library (``mylib.a``) version. The contents of all
+the libraries produced will be the same, they are just constructed differently.
+Note that you normally do not need to specify the sources involved. The LLVM
+Makefile system will infer the source files from the contents of the source
+directory.
+
+The ``LOADABLE_MODULE=1`` directive can be used in conjunction with
+``SHARED_LIBRARY=1`` to indicate that the resulting shared library should be
+openable with the ``dlopen`` function and searchable with the ``dlsym`` function
+(or your operating system's equivalents). While this isn't strictly necessary on
+Linux and a few other platforms, it is required on systems like HP-UX and
+Darwin. You should use ``LOADABLE_MODULE`` for any shared library that you
+intend to be loaded into an tool via the ``-load`` option. See the
+`WritingAnLLVMPass.html <WritingAnLLVMPass.html#makefile>`_ document for an
+example of why you might want to do this.
+
+Bitcode Modules
+^^^^^^^^^^^^^^^
+
+In some situations, it is desirable to build a single bitcode module from a
+variety of sources, instead of an archive, shared library, or bitcode
+library. Bitcode modules can be specified in addition to any of the other types
+of libraries by defining the `MODULE_NAME`_ variable. For example:
+
+.. code-block:: makefile
+
+ LIBRARYNAME = mylib
+ BYTECODE_LIBRARY = 1
+ MODULE_NAME = mymod
+
+will build a module named ``mymod.bc`` from the sources in the directory. This
+module will be an aggregation of all the bitcode modules derived from the
+sources. The example will also build a bitcode archive containing a bitcode
+module for each compiled source file. The difference is subtle, but important
+depending on how the module or library is to be linked.
+
+Loadable Modules
+^^^^^^^^^^^^^^^^
+
+In some situations, you need to create a loadable module. Loadable modules can
+be loaded into programs like ``opt`` or ``llc`` to specify additional passes to
+run or targets to support. Loadable modules are also useful for debugging a
+pass or providing a pass with another package if that pass can't be included in
+LLVM.
+
+LLVM provides complete support for building such a module. All you need to do is
+use the ``LOADABLE_MODULE`` variable in your ``Makefile``. For example, to build
+a loadable module named ``MyMod`` that uses the LLVM libraries ``LLVMSupport.a``
+and ``LLVMSystem.a``, you would specify:
+
+.. code-block:: makefile
+
+ LIBRARYNAME := MyMod
+ LOADABLE_MODULE := 1
+ LINK_COMPONENTS := support system
+
+Use of the ``LOADABLE_MODULE`` facility implies several things:
+
+#. There will be no "``lib``" prefix on the module. This differentiates it from
+ a standard shared library of the same name.
+
+#. The `SHARED_LIBRARY`_ variable is turned on.
+
+#. The `LINK_LIBS_IN_SHARED`_ variable is turned on.
+
+A loadable module is loaded by LLVM via the facilities of libtool's libltdl
+library which is part of ``lib/System`` implementation.
+
+Tools
+-----
+
+For building executable programs (tools), you must provide the name of the tool
+and the names of the libraries you wish to link with the tool. For example:
+
+.. code-block:: makefile
+
+ TOOLNAME = mytool
+ USEDLIBS = mylib
+ LINK_COMPONENTS = support system
+
+says that we are to build a tool name ``mytool`` and that it requires three
+libraries: ``mylib``, ``LLVMSupport.a`` and ``LLVMSystem.a``.
+
+Note that two different variables are use to indicate which libraries are
+linked: ``USEDLIBS`` and ``LLVMLIBS``. This distinction is necessary to support
+projects. ``LLVMLIBS`` refers to the LLVM libraries found in the LLVM object
+directory. ``USEDLIBS`` refers to the libraries built by your project. In the
+case of building LLVM tools, ``USEDLIBS`` and ``LLVMLIBS`` can be used
+interchangeably since the "project" is LLVM itself and ``USEDLIBS`` refers to
+the same place as ``LLVMLIBS``.
+
+Also note that there are two different ways of specifying a library: with a
+``.a`` suffix and without. Without the suffix, the entry refers to the re-linked
+(.o) file which will include *all* symbols of the library. This is
+useful, for example, to include all passes from a library of passes. If the
+``.a`` suffix is used then the library is linked as a searchable library (with
+the ``-l`` option). In this case, only the symbols that are unresolved *at
+that point* will be resolved from the library, if they exist. Other
+(unreferenced) symbols will not be included when the ``.a`` syntax is used. Note
+that in order to use the ``.a`` suffix, the library in question must have been
+built with the ``ARCHIVE_LIBRARY`` option set.
+
+JIT Tools
+^^^^^^^^^
+
+Many tools will want to use the JIT features of LLVM. To do this, you simply
+specify that you want an execution 'engine', and the makefiles will
+automatically link in the appropriate JIT for the host or an interpreter if none
+is available:
+
+.. code-block:: makefile
+
+ TOOLNAME = my_jit_tool
+ USEDLIBS = mylib
+ LINK_COMPONENTS = engine
+
+Of course, any additional libraries may be listed as other components. To get a
+full understanding of how this changes the linker command, it is recommended
+that you:
+
+.. code-block:: bash
+
+ % cd examples/Fibonacci
+ % make VERBOSE=1
+
+Targets Supported
+=================
+
+This section describes each of the targets that can be built using the LLVM
+Makefile system. Any target can be invoked from any directory but not all are
+applicable to a given directory (e.g. "check", "dist" and "install" will always
+operate as if invoked from the top level directory).
+
+================= =============== ==================
+Target Name Implied Targets Target Description
+================= =============== ==================
+``all`` \ Compile the software recursively. Default target.
+``all-local`` \ Compile the software in the local directory only.
+``check`` \ Change to the ``test`` directory in a project and run the test suite there.
+``check-local`` \ Run a local test suite. Generally this is only defined in the ``Makefile`` of the project's ``test`` directory.
+``clean`` \ Remove built objects recursively.
+``clean-local`` \ Remove built objects from the local directory only.
+``dist`` ``all`` Prepare a source distribution tarball.
+``dist-check`` ``all`` Prepare a source distribution tarball and check that it builds.
+``dist-clean`` ``clean`` Clean source distribution tarball temporary files.
+``install`` ``all`` Copy built objects to installation directory.
+``preconditions`` ``all`` Check to make sure configuration and makefiles are up to date.
+``printvars`` ``all`` Prints variables defined by the makefile system (for debugging).
+``tags`` \ Make C and C++ tags files for emacs and vi.
+``uninstall`` \ Remove built objects from installation directory.
+================= =============== ==================
+
+.. _all:
+
+``all`` (default)
+-----------------
+
+When you invoke ``make`` with no arguments, you are implicitly instructing it to
+seek the ``all`` target (goal). This target is used for building the software
+recursively and will do different things in different directories. For example,
+in a ``lib`` directory, the ``all`` target will compile source files and
+generate libraries. But, in a ``tools`` directory, it will link libraries and
+generate executables.
+
+``all-local``
+-------------
+
+This target is the same as `all`_ but it operates only on the current directory
+instead of recursively.
+
+``check``
+---------
+
+This target can be invoked from anywhere within a project's directories but
+always invokes the `check-local`_ target in the project's ``test`` directory, if
+it exists and has a ``Makefile``. A warning is produced otherwise. If
+`TESTSUITE`_ is defined on the ``make`` command line, it will be passed down to
+the invocation of ``make check-local`` in the ``test`` directory. The intended
+usage for this is to assist in running specific suites of tests. If
+``TESTSUITE`` is not set, the implementation of ``check-local`` should run all
+normal tests. It is up to the project to define what different values for
+``TESTSUTE`` will do. See the `Testing Guide <TestingGuide.html>`_ for further
+details.
+
+``check-local``
+---------------
+
+This target should be implemented by the ``Makefile`` in the project's ``test``
+directory. It is invoked by the ``check`` target elsewhere. Each project is
+free to define the actions of ``check-local`` as appropriate for that
+project. The LLVM project itself uses dejagnu to run a suite of feature and
+regresson tests. Other projects may choose to use dejagnu or any other testing
+mechanism.
+
+``clean``
+---------
+
+This target cleans the build directory, recursively removing all things that the
+Makefile builds. The cleaning rules have been made guarded so they shouldn't go
+awry (via ``rm -f $(UNSET_VARIABLE)/*`` which will attempt to erase the entire
+directory structure.
+
+``clean-local``
+---------------
+
+This target does the same thing as ``clean`` but only for the current (local)
+directory.
+
+``dist``
+--------
+
+This target builds a distribution tarball. It first builds the entire project
+using the ``all`` target and then tars up the necessary files and compresses
+it. The generated tarball is sufficient for a casual source distribution, but
+probably not for a release (see ``dist-check``).
+
+``dist-check``
+--------------
+
+This target does the same thing as the ``dist`` target but also checks the
+distribution tarball. The check is made by unpacking the tarball to a new
+directory, configuring it, building it, installing it, and then verifying that
+the installation results are correct (by comparing to the original build). This
+target can take a long time to run but should be done before a release goes out
+to make sure that the distributed tarball can actually be built into a working
+release.
+
+``dist-clean``
+--------------
+
+This is a special form of the ``clean`` clean target. It performs a normal
+``clean`` but also removes things pertaining to building the distribution.
+
+``install``
+-----------
+
+This target finalizes shared objects and executables and copies all libraries,
+headers, executables and documentation to the directory given with the
+``--prefix`` option to ``configure``. When completed, the prefix directory will
+have everything needed to **use** LLVM.
+
+The LLVM makefiles can generate complete **internal** documentation for all the
+classes by using ``doxygen``. By default, this feature is **not** enabled
+because it takes a long time and generates a massive amount of data (>100MB). If
+you want this feature, you must configure LLVM with the --enable-doxygen switch
+and ensure that a modern version of doxygen (1.3.7 or later) is available in
+your ``PATH``. You can download doxygen from `here
+<http://www.stack.nl/~dimitri/doxygen/download.html#latestsrc>`_.
+
+``preconditions``
+-----------------
+
+This utility target checks to see if the ``Makefile`` in the object directory is
+older than the ``Makefile`` in the source directory and copies it if so. It also
+reruns the ``configure`` script if that needs to be done and rebuilds the
+``Makefile.config`` file similarly. Users may overload this target to ensure
+that sanity checks are run *before* any building of targets as all the targets
+depend on ``preconditions``.
+
+``printvars``
+-------------
+
+This utility target just causes the LLVM makefiles to print out some of the
+makefile variables so that you can double check how things are set.
+
+``reconfigure``
+---------------
+
+This utility target will force a reconfigure of LLVM or your project. It simply
+runs ``$(PROJ_OBJ_ROOT)/config.status --recheck`` to rerun the configuration
+tests and rebuild the configured files. This isn't generally useful as the
+makefiles will reconfigure themselves whenever its necessary.
+
+``spotless``
+------------
+
+.. warning::
+
+ Use with caution!
+
+This utility target, only available when ``$(PROJ_OBJ_ROOT)`` is not the same as
+``$(PROJ_SRC_ROOT)``, will completely clean the ``$(PROJ_OBJ_ROOT)`` directory
+by removing its content entirely and reconfiguring the directory. This returns
+the ``$(PROJ_OBJ_ROOT)`` directory to a completely fresh state. All content in
+the directory except configured files and top-level makefiles will be lost.
+
+``tags``
+--------
+
+This target will generate a ``TAGS`` file in the top-level source directory. It
+is meant for use with emacs, XEmacs, or ViM. The TAGS file provides an index of
+symbol definitions so that the editor can jump you to the definition
+quickly.
+
+``uninstall``
+-------------
+
+This target is the opposite of the ``install`` target. It removes the header,
+library and executable files from the installation directories. Note that the
+directories themselves are not removed because it is not guaranteed that LLVM is
+the only thing installing there (e.g. ``--prefix=/usr``).
+
+.. _variables:
+
+Variables
+=========
+
+Variables are used to tell the LLVM Makefile System what to do and to obtain
+information from it. Variables are also used internally by the LLVM Makefile
+System. Variable names that contain only the upper case alphabetic letters and
+underscore are intended for use by the end user. All other variables are
+internal to the LLVM Makefile System and should not be relied upon nor
+modified. The sections below describe how to use the LLVM Makefile
+variables.
+
+Control Variables
+-----------------
+
+Variables listed in the table below should be set *before* the inclusion of
+`$(LEVEL)/Makefile.common`_. These variables provide input to the LLVM make
+system that tell it what to do for the current directory.
+
+``BUILD_ARCHIVE``
+ If set to any value, causes an archive (.a) library to be built.
+
+``BUILT_SOURCES``
+ Specifies a set of source files that are generated from other source
+ files. These sources will be built before any other target processing to
+ ensure they are present.
+
+``BYTECODE_LIBRARY``
+ If set to any value, causes a bitcode library (.bc) to be built.
+
+``CONFIG_FILES``
+ Specifies a set of configuration files to be installed.
+
+``DEBUG_SYMBOLS``
+ If set to any value, causes the build to include debugging symbols even in
+ optimized objects, libraries and executables. This alters the flags
+ specified to the compilers and linkers. Debugging isn't fun in an optimized
+ build, but it is possible.
+
+``DIRS``
+ Specifies a set of directories, usually children of the current directory,
+ that should also be made using the same goal. These directories will be
+ built serially.
+
+``DISABLE_AUTO_DEPENDENCIES``
+ If set to any value, causes the makefiles to **not** automatically generate
+ dependencies when running the compiler. Use of this feature is discouraged
+ and it may be removed at a later date.
+
+``ENABLE_OPTIMIZED``
+ If set to 1, causes the build to generate optimized objects, libraries and
+ executables. This alters the flags specified to the compilers and
+ linkers. Generally debugging won't be a fun experience with an optimized
+ build.
+
+``ENABLE_PROFILING``
+ If set to 1, causes the build to generate both optimized and profiled
+ objects, libraries and executables. This alters the flags specified to the
+ compilers and linkers to ensure that profile data can be collected from the
+ tools built. Use the ``gprof`` tool to analyze the output from the profiled
+ tools (``gmon.out``).
+
+``DISABLE_ASSERTIONS``
+ If set to 1, causes the build to disable assertions, even if building a
+ debug or profile build. This will exclude all assertion check code from the
+ build. LLVM will execute faster, but with little help when things go
+ wrong.
+
+``EXPERIMENTAL_DIRS``
+ Specify a set of directories that should be built, but if they fail, it
+ should not cause the build to fail. Note that this should only be used
+ temporarily while code is being written.
+
+``EXPORTED_SYMBOL_FILE``
+ Specifies the name of a single file that contains a list of the symbols to
+ be exported by the linker. One symbol per line.
+
+``EXPORTED_SYMBOL_LIST``
+ Specifies a set of symbols to be exported by the linker.
+
+``EXTRA_DIST``
+ Specifies additional files that should be distributed with LLVM. All source
+ files, all built sources, all Makefiles, and most documentation files will
+ be automatically distributed. Use this variable to distribute any files that
+ are not automatically distributed.
+
+``KEEP_SYMBOLS``
+ If set to any value, specifies that when linking executables the makefiles
+ should retain debug symbols in the executable. Normally, symbols are
+ stripped from the executable.
+
+``LEVEL`` (required)
+ Specify the level of nesting from the top level. This variable must be set
+ in each makefile as it is used to find the top level and thus the other
+ makefiles.
+
+``LIBRARYNAME``
+ Specify the name of the library to be built. (Required For Libraries)
+
+``LINK_COMPONENTS``
+ When specified for building a tool, the value of this variable will be
+ passed to the ``llvm-config`` tool to generate a link line for the
+ tool. Unlike ``USEDLIBS`` and ``LLVMLIBS``, not all libraries need to be
+ specified. The ``llvm-config`` tool will figure out the library dependencies
+ and add any libraries that are needed. The ``USEDLIBS`` variable can still
+ be used in conjunction with ``LINK_COMPONENTS`` so that additional
+ project-specific libraries can be linked with the LLVM libraries specified
+ by ``LINK_COMPONENTS``.
+
+.. _LINK_LIBS_IN_SHARED:
+
+``LINK_LIBS_IN_SHARED``
+ By default, shared library linking will ignore any libraries specified with
+ the `LLVMLIBS`_ or `USEDLIBS`_. This prevents shared libs from including
+ things that will be in the LLVM tool the shared library will be loaded
+ into. However, sometimes it is useful to link certain libraries into your
+ shared library and this option enables that feature.
+
+.. _LLVMLIBS:
+
+``LLVMLIBS``
+ Specifies the set of libraries from the LLVM ``$(ObjDir)`` that will be
+ linked into the tool or library.
+
+``LOADABLE_MODULE``
+ If set to any value, causes the shared library being built to also be a
+ loadable module. Loadable modules can be opened with the dlopen() function
+ and searched with dlsym (or the operating system's equivalent). Note that
+ setting this variable without also setting ``SHARED_LIBRARY`` will have no
+ effect.
+
+.. _MODULE_NAME:
+
+``MODULE_NAME``
+ Specifies the name of a bitcode module to be created. A bitcode module can
+ be specified in conjunction with other kinds of library builds or by
+ itself. It constructs from the sources a single linked bitcode file.
+
+``NO_INSTALL``
+ Specifies that the build products of the directory should not be installed
+ but should be built even if the ``install`` target is given. This is handy
+ for directories that build libraries or tools that are only used as part of
+ the build process, such as code generators (e.g. ``tblgen``).
+
+``OPTIONAL_DIRS``
+ Specify a set of directories that may be built, if they exist, but its not
+ an error for them not to exist.
+
+``PARALLEL_DIRS``
+ Specify a set of directories to build recursively and in parallel if the
+ ``-j`` option was used with ``make``.
+
+.. _SHARED_LIBRARY:
+
+``SHARED_LIBRARY``
+ If set to any value, causes a shared library (``.so``) to be built in
+ addition to any other kinds of libraries. Note that this option will cause
+ all source files to be built twice: once with options for position
+ independent code and once without. Use it only where you really need a
+ shared library.
+
+``SOURCES`` (optional)
+ Specifies the list of source files in the current directory to be
+ built. Source files of any type may be specified (programs, documentation,
+ config files, etc.). If not specified, the makefile system will infer the
+ set of source files from the files present in the current directory.
+
+``SUFFIXES``
+ Specifies a set of filename suffixes that occur in suffix match rules. Only
+ set this if your local ``Makefile`` specifies additional suffix match
+ rules.
+
+``TARGET``
+ Specifies the name of the LLVM code generation target that the current
+ directory builds. Setting this variable enables additional rules to build
+ ``.inc`` files from ``.td`` files.
+
+.. _TESTSUITE:
+
+``TESTSUITE``
+ Specifies the directory of tests to run in ``llvm/test``.
+
+``TOOLNAME``
+ Specifies the name of the tool that the current directory should build.
+
+``TOOL_VERBOSE``
+ Implies ``VERBOSE`` and also tells each tool invoked to be verbose. This is
+ handy when you're trying to see the sub-tools invoked by each tool invoked
+ by the makefile. For example, this will pass ``-v`` to the GCC compilers
+ which causes it to print out the command lines it uses to invoke sub-tools
+ (compiler, assembler, linker).
+
+.. _USEDLIBS:
+
+``USEDLIBS``
+ Specifies the list of project libraries that will be linked into the tool or
+ library.
+
+``VERBOSE``
+ Tells the Makefile system to produce detailed output of what it is doing
+ instead of just summary comments. This will generate a LOT of output.
+
+Override Variables
+------------------
+
+Override variables can be used to override the default values provided by the
+LLVM makefile system. These variables can be set in several ways:
+
+* In the environment (e.g. setenv, export) --- not recommended.
+* On the ``make`` command line --- recommended.
+* On the ``configure`` command line.
+* In the Makefile (only *after* the inclusion of `$(LEVEL)/Makefile.common`_).
+
+The override variables are given below:
+
+``AR`` (defaulted)
+ Specifies the path to the ``ar`` tool.
+
+``PROJ_OBJ_DIR``
+ The directory into which the products of build rules will be placed. This
+ might be the same as `PROJ_SRC_DIR`_ but typically is not.
+
+.. _PROJ_SRC_DIR:
+
+``PROJ_SRC_DIR``
+ The directory which contains the source files to be built.
+
+``BUILD_EXAMPLES``
+ If set to 1, build examples in ``examples`` and (if building Clang)
+ ``tools/clang/examples`` directories.
+
+``BZIP2`` (configured)
+ The path to the ``bzip2`` tool.
+
+``CC`` (configured)
+ The path to the 'C' compiler.
+
+``CFLAGS``
+ Additional flags to be passed to the 'C' compiler.
+
+``CXX``
+ Specifies the path to the C++ compiler.
+
+``CXXFLAGS``
+ Additional flags to be passed to the C++ compiler.
+
+``DATE`` (configured)
+ Specifies the path to the ``date`` program or any program that can generate
+ the current date and time on its standard output.
+
+``DOT`` (configured)
+ Specifies the path to the ``dot`` tool or ``false`` if there isn't one.
+
+``ECHO`` (configured)
+ Specifies the path to the ``echo`` tool for printing output.
+
+``EXEEXT`` (configured)
+ Provides the extension to be used on executables built by the makefiles.
+ The value may be empty on platforms that do not use file extensions for
+ executables (e.g. Unix).
+
+``INSTALL`` (configured)
+ Specifies the path to the ``install`` tool.
+
+``LDFLAGS`` (configured)
+ Allows users to specify additional flags to pass to the linker.
+
+``LIBS`` (configured)
+ The list of libraries that should be linked with each tool.
+
+``LIBTOOL`` (configured)
+ Specifies the path to the ``libtool`` tool. This tool is renamed ``mklib``
+ by the ``configure`` script.
+
+``LLVMAS`` (defaulted)
+ Specifies the path to the ``llvm-as`` tool.
+
+``LLVMCC``
+ Specifies the path to the LLVM capable compiler.
+
+``LLVMCXX``
+ Specifies the path to the LLVM C++ capable compiler.
+
+``LLVMGCC`` (defaulted)
+ Specifies the path to the LLVM version of the GCC 'C' Compiler.
+
+``LLVMGXX`` (defaulted)
+ Specifies the path to the LLVM version of the GCC C++ Compiler.
+
+``LLVMLD`` (defaulted)
+ Specifies the path to the LLVM bitcode linker tool
+
+``LLVM_OBJ_ROOT`` (configured)
+ Specifies the top directory into which the output of the build is placed.
+
+``LLVM_SRC_ROOT`` (configured)
+ Specifies the top directory in which the sources are found.
+
+``LLVM_TARBALL_NAME`` (configured)
+ Specifies the name of the distribution tarball to create. This is configured
+ from the name of the project and its version number.
+
+``MKDIR`` (defaulted)
+ Specifies the path to the ``mkdir`` tool that creates directories.
+
+``ONLY_TOOLS``
+ If set, specifies the list of tools to build.
+
+``PLATFORMSTRIPOPTS``
+ The options to provide to the linker to specify that a stripped (no symbols)
+ executable should be built.
+
+``RANLIB`` (defaulted)
+ Specifies the path to the ``ranlib`` tool.
+
+``RM`` (defaulted)
+ Specifies the path to the ``rm`` tool.
+
+``SED`` (defaulted)
+ Specifies the path to the ``sed`` tool.
+
+``SHLIBEXT`` (configured)
+ Provides the filename extension to use for shared libraries.
+
+``TBLGEN`` (defaulted)
+ Specifies the path to the ``tblgen`` tool.
+
+``TAR`` (defaulted)
+ Specifies the path to the ``tar`` tool.
+
+``ZIP`` (defaulted)
+ Specifies the path to the ``zip`` tool.
+
+Readable Variables
+------------------
+
+Variables listed in the table below can be used by the user's Makefile but
+should not be changed. Changing the value will generally cause the build to go
+wrong, so don't do it.
+
+``bindir``
+ The directory into which executables will ultimately be installed. This
+ value is derived from the ``--prefix`` option given to ``configure``.
+
+``BuildMode``
+ The name of the type of build being performed: Debug, Release, or
+ Profile.
+
+``bytecode_libdir``
+ The directory into which bitcode libraries will ultimately be installed.
+ This value is derived from the ``--prefix`` option given to ``configure``.
+
+``ConfigureScriptFLAGS``
+ Additional flags given to the ``configure`` script when reconfiguring.
+
+``DistDir``
+ The *current* directory for which a distribution copy is being made.
+
+.. _Echo:
+
+``Echo``
+ The LLVM Makefile System output command. This provides the ``llvm[n]``
+ prefix and starts with ``@`` so the command itself is not printed by
+ ``make``.
+
+``EchoCmd``
+ Same as `Echo`_ but without the leading ``@``.
+
+``includedir``
+ The directory into which include files will ultimately be installed. This
+ value is derived from the ``--prefix`` option given to ``configure``.
+
+``libdir``
+ The directory into which native libraries will ultimately be installed.
+ This value is derived from the ``--prefix`` option given to
+ ``configure``.
+
+``LibDir``
+ The configuration specific directory into which libraries are placed before
+ installation.
+
+``MakefileConfig``
+ Full path of the ``Makefile.config`` file.
+
+``MakefileConfigIn``
+ Full path of the ``Makefile.config.in`` file.
+
+``ObjDir``
+ The configuration and directory specific directory where build objects
+ (compilation results) are placed.
+
+``SubDirs``
+ The complete list of sub-directories of the current directory as
+ specified by other variables.
+
+``Sources``
+ The complete list of source files.
+
+``sysconfdir``
+ The directory into which configuration files will ultimately be
+ installed. This value is derived from the ``--prefix`` option given to
+ ``configure``.
+
+``ToolDir``
+ The configuration specific directory into which executables are placed
+ before they are installed.
+
+``TopDistDir``
+ The top most directory into which the distribution files are copied.
+
+``Verb``
+ Use this as the first thing on your build script lines to enable or disable
+ verbose mode. It expands to either an ``@`` (quiet mode) or nothing (verbose
+ mode).
+
+Internal Variables
+------------------
+
+Variables listed below are used by the LLVM Makefile System and considered
+internal. You should not use these variables under any circumstances.
+
+.. code-block:: makefile
+
+ Archive
+ AR.Flags
+ BaseNameSources
+ BCCompile.C
+ BCCompile.CXX
+ BCLinkLib
+ C.Flags
+ Compile.C
+ CompileCommonOpts
+ Compile.CXX
+ ConfigStatusScript
+ ConfigureScript
+ CPP.Flags
+ CPP.Flags
+ CXX.Flags
+ DependFiles
+ DestArchiveLib
+ DestBitcodeLib
+ DestModule
+ DestSharedLib
+ DestTool
+ DistAlways
+ DistCheckDir
+ DistCheckTop
+ DistFiles
+ DistName
+ DistOther
+ DistSources
+ DistSubDirs
+ DistTarBZ2
+ DistTarGZip
+ DistZip
+ ExtraLibs
+ FakeSources
+ INCFiles
+ InternalTargets
+ LD.Flags
+ LibName.A
+ LibName.BC
+ LibName.LA
+ LibName.O
+ LibTool.Flags
+ Link
+ LinkModule
+ LLVMLibDir
+ LLVMLibsOptions
+ LLVMLibsPaths
+ LLVMToolDir
+ LLVMUsedLibs
+ LocalTargets
+ Module
+ ObjectsBC
+ ObjectsLO
+ ObjectsO
+ ObjMakefiles
+ ParallelTargets
+ PreConditions
+ ProjLibsOptions
+ ProjLibsPaths
+ ProjUsedLibs
+ Ranlib
+ RecursiveTargets
+ SrcMakefiles
+ Strip
+ StripWarnMsg
+ TableGen
+ TDFiles
+ ToolBuildPath
+ TopLevelTargets
+ UserTargets
diff --git a/docs/Packaging.rst b/docs/Packaging.rst
new file mode 100644
index 00000000000..6e74158d721
--- /dev/null
+++ b/docs/Packaging.rst
@@ -0,0 +1,75 @@
+.. _packaging:
+
+========================
+Advice on Packaging LLVM
+========================
+
+.. contents::
+ :local:
+
+Overview
+========
+
+LLVM sets certain default configure options to make sure our developers don't
+break things for constrained platforms. These settings are not optimal for most
+desktop systems, and we hope that packagers (e.g., Redhat, Debian, MacPorts,
+etc.) will tweak them. This document lists settings we suggest you tweak.
+
+LLVM's API changes with each release, so users are likely to want, for example,
+both LLVM-2.6 and LLVM-2.7 installed at the same time to support apps developed
+against each.
+
+Compile Flags
+=============
+
+LLVM runs much more quickly when it's optimized and assertions are removed.
+However, such a build is currently incompatible with users who build without
+defining ``NDEBUG``, and the lack of assertions makes it hard to debug problems
+in user code. We recommend allowing users to install both optimized and debug
+versions of LLVM in parallel. The following configure flags are relevant:
+
+``--disable-assertions``
+ Builds LLVM with ``NDEBUG`` defined. Changes the LLVM ABI. Also available
+ by setting ``DISABLE_ASSERTIONS=0|1`` in ``make``'s environment. This
+ defaults to enabled regardless of the optimization setting, but it slows
+ things down.
+
+``--enable-debug-symbols``
+ Builds LLVM with ``-g``. Also available by setting ``DEBUG_SYMBOLS=0|1`` in
+ ``make``'s environment. This defaults to disabled when optimizing, so you
+ should turn it back on to let users debug their programs.
+
+``--enable-optimized``
+ (For svn checkouts) Builds LLVM with ``-O2`` and, by default, turns off
+ debug symbols. Also available by setting ``ENABLE_OPTIMIZED=0|1`` in
+ ``make``'s environment. This defaults to enabled when not in a
+ checkout.
+
+C++ Features
+============
+
+RTTI
+ LLVM disables RTTI by default. Add ``REQUIRES_RTTI=1`` to your environment
+ while running ``make`` to re-enable it. This will allow users to build with
+ RTTI enabled and still inherit from LLVM classes.
+
+Shared Library
+==============
+
+Configure with ``--enable-shared`` to build
+``libLLVM-<major>.<minor>.(so|dylib)`` and link the tools against it. This
+saves lots of binary size at the cost of some startup time.
+
+Dependencies
+============
+
+``--enable-libffi``
+ Depend on `libffi <http://sources.redhat.com/libffi/>`_ to allow the LLVM
+ interpreter to call external functions.
+
+``--with-oprofile``
+
+ Depend on `libopagent
+ <http://oprofile.sourceforge.net/doc/devel/index.html>`_ (>=version 0.9.4)
+ to let the LLVM JIT tell oprofile about function addresses and line
+ numbers.
diff --git a/docs/Passes.html b/docs/Passes.html
new file mode 100644
index 00000000000..85292e37412
--- /dev/null
+++ b/docs/Passes.html
@@ -0,0 +1,2066 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <title>LLVM's Analysis and Transform Passes</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+ <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
+</head>
+<body>
+
+<!--
+
+If Passes.html is up to date, the following "one-liner" should print
+an empty diff.
+
+egrep -e '^<tr><td><a href="#.*">-.*</a></td><td>.*</td></tr>$' \
+ -e '^ <a name=".*">.*</a>$' < Passes.html >html; \
+perl >help <<'EOT' && diff -u help html; rm -f help html
+open HTML, "<Passes.html" or die "open: Passes.html: $!\n";
+while (<HTML>) {
+ m:^<tr><td><a href="#(.*)">-.*</a></td><td>.*</td></tr>$: or next;
+ $order{$1} = sprintf("%03d", 1 + int %order);
+}
+open HELP, "../Release/bin/opt -help|" or die "open: opt -help: $!\n";
+while (<HELP>) {
+ m:^ -([^ ]+) +- (.*)$: or next;
+ my $o = $order{$1};
+ $o = "000" unless defined $o;
+ push @x, "$o<tr><td><a href=\"#$1\">-$1</a></td><td>$2</td></tr>\n";
+ push @y, "$o <a name=\"$1\">-$1: $2</a>\n";
+}
+@x = map { s/^\d\d\d//; $_ } sort @x;
+@y = map { s/^\d\d\d//; $_ } sort @y;
+print @x, @y;
+EOT
+
+This (real) one-liner can also be helpful when converting comments to HTML:
+
+perl -e '$/ = undef; for (split(/\n/, <>)) { s:^ *///? ?::; print " <p>\n" if !$on && $_ =~ /\S/; print " </p>\n" if $on && $_ =~ /^\s*$/; print " $_\n"; $on = ($_ =~ /\S/); } print " </p>\n" if $on'
+
+ -->
+
+<h1>LLVM's Analysis and Transform Passes</h1>
+
+<ol>
+ <li><a href="#intro">Introduction</a></li>
+ <li><a href="#analyses">Analysis Passes</a>
+ <li><a href="#transforms">Transform Passes</a></li>
+ <li><a href="#utilities">Utility Passes</a></li>
+</ol>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:rspencer@x10sys.com">Reid Spencer</a>
+ and Gordon Henriksen</p>
+</div>
+
+<!-- ======================================================================= -->
+<h2><a name="intro">Introduction</a></h2>
+<div>
+ <p>This document serves as a high level summary of the optimization features
+ that LLVM provides. Optimizations are implemented as Passes that traverse some
+ portion of a program to either collect information or transform the program.
+ The table below divides the passes that LLVM provides into three categories.
+ Analysis passes compute information that other passes can use or for debugging
+ or program visualization purposes. Transform passes can use (or invalidate)
+ the analysis passes. Transform passes all mutate the program in some way.
+ Utility passes provides some utility but don't otherwise fit categorization.
+ For example passes to extract functions to bitcode or write a module to
+ bitcode are neither analysis nor transform passes.
+ <p>The table below provides a quick summary of each pass and links to the more
+ complete pass description later in the document.</p>
+
+<table>
+<tr><th colspan="2"><b>ANALYSIS PASSES</b></th></tr>
+<tr><th>Option</th><th>Name</th></tr>
+<tr><td><a href="#aa-eval">-aa-eval</a></td><td>Exhaustive Alias Analysis Precision Evaluator</td></tr>
+<tr><td><a href="#basicaa">-basicaa</a></td><td>Basic Alias Analysis (stateless AA impl)</td></tr>
+<tr><td><a href="#basiccg">-basiccg</a></td><td>Basic CallGraph Construction</td></tr>
+<tr><td><a href="#count-aa">-count-aa</a></td><td>Count Alias Analysis Query Responses</td></tr>
+<tr><td><a href="#debug-aa">-debug-aa</a></td><td>AA use debugger</td></tr>
+<tr><td><a href="#domfrontier">-domfrontier</a></td><td>Dominance Frontier Construction</td></tr>
+<tr><td><a href="#domtree">-domtree</a></td><td>Dominator Tree Construction</td></tr>
+<tr><td><a href="#dot-callgraph">-dot-callgraph</a></td><td>Print Call Graph to 'dot' file</td></tr>
+<tr><td><a href="#dot-cfg">-dot-cfg</a></td><td>Print CFG of function to 'dot' file</td></tr>
+<tr><td><a href="#dot-cfg-only">-dot-cfg-only</a></td><td>Print CFG of function to 'dot' file (with no function bodies)</td></tr>
+<tr><td><a href="#dot-dom">-dot-dom</a></td><td>Print dominance tree of function to 'dot' file</td></tr>
+<tr><td><a href="#dot-dom-only">-dot-dom-only</a></td><td>Print dominance tree of function to 'dot' file (with no function bodies)</td></tr>
+<tr><td><a href="#dot-postdom">-dot-postdom</a></td><td>Print postdominance tree of function to 'dot' file</td></tr>
+<tr><td><a href="#dot-postdom-only">-dot-postdom-only</a></td><td>Print postdominance tree of function to 'dot' file (with no function bodies)</td></tr>
+<tr><td><a href="#globalsmodref-aa">-globalsmodref-aa</a></td><td>Simple mod/ref analysis for globals</td></tr>
+<tr><td><a href="#instcount">-instcount</a></td><td>Counts the various types of Instructions</td></tr>
+<tr><td><a href="#intervals">-intervals</a></td><td>Interval Partition Construction</td></tr>
+<tr><td><a href="#iv-users">-iv-users</a></td><td>Induction Variable Users</td></tr>
+<tr><td><a href="#lazy-value-info">-lazy-value-info</a></td><td>Lazy Value Information Analysis</td></tr>
+<tr><td><a href="#lda">-lda</a></td><td>Loop Dependence Analysis</td></tr>
+<tr><td><a href="#libcall-aa">-libcall-aa</a></td><td>LibCall Alias Analysis</td></tr>
+<tr><td><a href="#lint">-lint</a></td><td>Statically lint-checks LLVM IR</td></tr>
+<tr><td><a href="#loops">-loops</a></td><td>Natural Loop Information</td></tr>
+<tr><td><a href="#memdep">-memdep</a></td><td>Memory Dependence Analysis</td></tr>
+<tr><td><a href="#module-debuginfo">-module-debuginfo</a></td><td>Decodes module-level debug info</td></tr>
+<tr><td><a href="#no-aa">-no-aa</a></td><td>No Alias Analysis (always returns 'may' alias)</td></tr>
+<tr><td><a href="#no-profile">-no-profile</a></td><td>No Profile Information</td></tr>
+<tr><td><a href="#postdomtree">-postdomtree</a></td><td>Post-Dominator Tree Construction</td></tr>
+<tr><td><a href="#print-alias-sets">-print-alias-sets</a></td><td>Alias Set Printer</td></tr>
+<tr><td><a href="#print-callgraph">-print-callgraph</a></td><td>Print a call graph</td></tr>
+<tr><td><a href="#print-callgraph-sccs">-print-callgraph-sccs</a></td><td>Print SCCs of the Call Graph</td></tr>
+<tr><td><a href="#print-cfg-sccs">-print-cfg-sccs</a></td><td>Print SCCs of each function CFG</td></tr>
+<tr><td><a href="#print-dbginfo">-print-dbginfo</a></td><td>Print debug info in human readable form</td></tr>
+<tr><td><a href="#print-dom-info">-print-dom-info</a></td><td>Dominator Info Printer</td></tr>
+<tr><td><a href="#print-externalfnconstants">-print-externalfnconstants</a></td><td>Print external fn callsites passed constants</td></tr>
+<tr><td><a href="#print-function">-print-function</a></td><td>Print function to stderr</td></tr>
+<tr><td><a href="#print-module">-print-module</a></td><td>Print module to stderr</td></tr>
+<tr><td><a href="#print-used-types">-print-used-types</a></td><td>Find Used Types</td></tr>
+<tr><td><a href="#profile-estimator">-profile-estimator</a></td><td>Estimate profiling information</td></tr>
+<tr><td><a href="#profile-loader">-profile-loader</a></td><td>Load profile information from llvmprof.out</td></tr>
+<tr><td><a href="#profile-verifier">-profile-verifier</a></td><td>Verify profiling information</td></tr>
+<tr><td><a href="#regions">-regions</a></td><td>Detect single entry single exit regions</td></tr>
+<tr><td><a href="#scalar-evolution">-scalar-evolution</a></td><td>Scalar Evolution Analysis</td></tr>
+<tr><td><a href="#scev-aa">-scev-aa</a></td><td>ScalarEvolution-based Alias Analysis</td></tr>
+<tr><td><a href="#targetdata">-targetdata</a></td><td>Target Data Layout</td></tr>
+
+
+<tr><th colspan="2"><b>TRANSFORM PASSES</b></th></tr>
+<tr><th>Option</th><th>Name</th></tr>
+<tr><td><a href="#adce">-adce</a></td><td>Aggressive Dead Code Elimination</td></tr>
+<tr><td><a href="#always-inline">-always-inline</a></td><td>Inliner for always_inline functions</td></tr>
+<tr><td><a href="#argpromotion">-argpromotion</a></td><td>Promote 'by reference' arguments to scalars</td></tr>
+<tr><td><a href="#bb-vectorize">-bb-vectorize</a></td><td>Combine instructions to form vector instructions within basic blocks</td></tr>
+<tr><td><a href="#block-placement">-block-placement</a></td><td>Profile Guided Basic Block Placement</td></tr>
+<tr><td><a href="#break-crit-edges">-break-crit-edges</a></td><td>Break critical edges in CFG</td></tr>
+<tr><td><a href="#codegenprepare">-codegenprepare</a></td><td>Optimize for code generation</td></tr>
+<tr><td><a href="#constmerge">-constmerge</a></td><td>Merge Duplicate Global Constants</td></tr>
+<tr><td><a href="#constprop">-constprop</a></td><td>Simple constant propagation</td></tr>
+<tr><td><a href="#dce">-dce</a></td><td>Dead Code Elimination</td></tr>
+<tr><td><a href="#deadargelim">-deadargelim</a></td><td>Dead Argument Elimination</td></tr>
+<tr><td><a href="#deadtypeelim">-deadtypeelim</a></td><td>Dead Type Elimination</td></tr>
+<tr><td><a href="#die">-die</a></td><td>Dead Instruction Elimination</td></tr>
+<tr><td><a href="#dse">-dse</a></td><td>Dead Store Elimination</td></tr>
+<tr><td><a href="#functionattrs">-functionattrs</a></td><td>Deduce function attributes</td></tr>
+<tr><td><a href="#globaldce">-globaldce</a></td><td>Dead Global Elimination</td></tr>
+<tr><td><a href="#globalopt">-globalopt</a></td><td>Global Variable Optimizer</td></tr>
+<tr><td><a href="#gvn">-gvn</a></td><td>Global Value Numbering</td></tr>
+<tr><td><a href="#indvars">-indvars</a></td><td>Canonicalize Induction Variables</td></tr>
+<tr><td><a href="#inline">-inline</a></td><td>Function Integration/Inlining</td></tr>
+<tr><td><a href="#insert-edge-profiling">-insert-edge-profiling</a></td><td>Insert instrumentation for edge profiling</td></tr>
+<tr><td><a href="#insert-optimal-edge-profiling">-insert-optimal-edge-profiling</a></td><td>Insert optimal instrumentation for edge profiling</td></tr>
+<tr><td><a href="#instcombine">-instcombine</a></td><td>Combine redundant instructions</td></tr>
+<tr><td><a href="#internalize">-internalize</a></td><td>Internalize Global Symbols</td></tr>
+<tr><td><a href="#ipconstprop">-ipconstprop</a></td><td>Interprocedural constant propagation</td></tr>
+<tr><td><a href="#ipsccp">-ipsccp</a></td><td>Interprocedural Sparse Conditional Constant Propagation</td></tr>
+<tr><td><a href="#jump-threading">-jump-threading</a></td><td>Jump Threading</td></tr>
+<tr><td><a href="#lcssa">-lcssa</a></td><td>Loop-Closed SSA Form Pass</td></tr>
+<tr><td><a href="#licm">-licm</a></td><td>Loop Invariant Code Motion</td></tr>
+<tr><td><a href="#loop-deletion">-loop-deletion</a></td><td>Delete dead loops</td></tr>
+<tr><td><a href="#loop-extract">-loop-extract</a></td><td>Extract loops into new functions</td></tr>
+<tr><td><a href="#loop-extract-single">-loop-extract-single</a></td><td>Extract at most one loop into a new function</td></tr>
+<tr><td><a href="#loop-reduce">-loop-reduce</a></td><td>Loop Strength Reduction</td></tr>
+<tr><td><a href="#loop-rotate">-loop-rotate</a></td><td>Rotate Loops</td></tr>
+<tr><td><a href="#loop-simplify">-loop-simplify</a></td><td>Canonicalize natural loops</td></tr>
+<tr><td><a href="#loop-unroll">-loop-unroll</a></td><td>Unroll loops</td></tr>
+<tr><td><a href="#loop-unswitch">-loop-unswitch</a></td><td>Unswitch loops</td></tr>
+<tr><td><a href="#loweratomic">-loweratomic</a></td><td>Lower atomic intrinsics to non-atomic form</td></tr>
+<tr><td><a href="#lowerinvoke">-lowerinvoke</a></td><td>Lower invoke and unwind, for unwindless code generators</td></tr>
+<tr><td><a href="#lowerswitch">-lowerswitch</a></td><td>Lower SwitchInst's to branches</td></tr>
+<tr><td><a href="#mem2reg">-mem2reg</a></td><td>Promote Memory to Register</td></tr>
+<tr><td><a href="#memcpyopt">-memcpyopt</a></td><td>MemCpy Optimization</td></tr>
+<tr><td><a href="#mergefunc">-mergefunc</a></td><td>Merge Functions</td></tr>
+<tr><td><a href="#mergereturn">-mergereturn</a></td><td>Unify function exit nodes</td></tr>
+<tr><td><a href="#partial-inliner">-partial-inliner</a></td><td>Partial Inliner</td></tr>
+<tr><td><a href="#prune-eh">-prune-eh</a></td><td>Remove unused exception handling info</td></tr>
+<tr><td><a href="#reassociate">-reassociate</a></td><td>Reassociate expressions</td></tr>
+<tr><td><a href="#reg2mem">-reg2mem</a></td><td>Demote all values to stack slots</td></tr>
+<tr><td><a href="#scalarrepl">-scalarrepl</a></td><td>Scalar Replacement of Aggregates (DT)</td></tr>
+<tr><td><a href="#sccp">-sccp</a></td><td>Sparse Conditional Constant Propagation</td></tr>
+<tr><td><a href="#simplify-libcalls">-simplify-libcalls</a></td><td>Simplify well-known library calls</td></tr>
+<tr><td><a href="#simplifycfg">-simplifycfg</a></td><td>Simplify the CFG</td></tr>
+<tr><td><a href="#sink">-sink</a></td><td>Code sinking</td></tr>
+<tr><td><a href="#sretpromotion">-sretpromotion</a></td><td>Promote sret arguments to multiple ret values</td></tr>
+<tr><td><a href="#strip">-strip</a></td><td>Strip all symbols from a module</td></tr>
+<tr><td><a href="#strip-dead-debug-info">-strip-dead-debug-info</a></td><td>Strip debug info for unused symbols</td></tr>
+<tr><td><a href="#strip-dead-prototypes">-strip-dead-prototypes</a></td><td>Strip Unused Function Prototypes</td></tr>
+<tr><td><a href="#strip-debug-declare">-strip-debug-declare</a></td><td>Strip all llvm.dbg.declare intrinsics</td></tr>
+<tr><td><a href="#strip-nondebug">-strip-nondebug</a></td><td>Strip all symbols, except dbg symbols, from a module</td></tr>
+<tr><td><a href="#tailcallelim">-tailcallelim</a></td><td>Tail Call Elimination</td></tr>
+<tr><td><a href="#tailduplicate">-tailduplicate</a></td><td>Tail Duplication</td></tr>
+
+
+<tr><th colspan="2"><b>UTILITY PASSES</b></th></tr>
+<tr><th>Option</th><th>Name</th></tr>
+<tr><td><a href="#deadarghaX0r">-deadarghaX0r</a></td><td>Dead Argument Hacking (BUGPOINT USE ONLY; DO NOT USE)</td></tr>
+<tr><td><a href="#extract-blocks">-extract-blocks</a></td><td>Extract Basic Blocks From Module (for bugpoint use)</td></tr>
+<tr><td><a href="#instnamer">-instnamer</a></td><td>Assign names to anonymous instructions</td></tr>
+<tr><td><a href="#preverify">-preverify</a></td><td>Preliminary module verification</td></tr>
+<tr><td><a href="#verify">-verify</a></td><td>Module Verifier</td></tr>
+<tr><td><a href="#view-cfg">-view-cfg</a></td><td>View CFG of function</td></tr>
+<tr><td><a href="#view-cfg-only">-view-cfg-only</a></td><td>View CFG of function (with no function bodies)</td></tr>
+<tr><td><a href="#view-dom">-view-dom</a></td><td>View dominance tree of function</td></tr>
+<tr><td><a href="#view-dom-only">-view-dom-only</a></td><td>View dominance tree of function (with no function bodies)</td></tr>
+<tr><td><a href="#view-postdom">-view-postdom</a></td><td>View postdominance tree of function</td></tr>
+<tr><td><a href="#view-postdom-only">-view-postdom-only</a></td><td>View postdominance tree of function (with no function bodies)</td></tr>
+</table>
+
+</div>
+
+<!-- ======================================================================= -->
+<h2><a name="analyses">Analysis Passes</a></h2>
+<div>
+ <p>This section describes the LLVM Analysis Passes.</p>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="aa-eval">-aa-eval: Exhaustive Alias Analysis Precision Evaluator</a>
+</h3>
+<div>
+ <p>This is a simple N^2 alias analysis accuracy evaluator.
+ Basically, for each function in the program, it simply queries to see how the
+ alias analysis implementation answers alias queries between each pair of
+ pointers in the function.</p>
+
+ <p>This is inspired and adapted from code by: Naveen Neelakantam, Francesco
+ Spadini, and Wojciech Stryjewski.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="basicaa">-basicaa: Basic Alias Analysis (stateless AA impl)</a>
+</h3>
+<div>
+ <p>A basic alias analysis pass that implements identities (two different
+ globals cannot alias, etc), but does no stateful analysis.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="basiccg">-basiccg: Basic CallGraph Construction</a>
+</h3>
+<div>
+ <p>Yet to be written.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="count-aa">-count-aa: Count Alias Analysis Query Responses</a>
+</h3>
+<div>
+ <p>
+ A pass which can be used to count how many alias queries
+ are being made and how the alias analysis implementation being used responds.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="debug-aa">-debug-aa: AA use debugger</a>
+</h3>
+<div>
+ <p>
+ This simple pass checks alias analysis users to ensure that if they
+ create a new value, they do not query AA without informing it of the value.
+ It acts as a shim over any other AA pass you want.
+ </p>
+
+ <p>
+ Yes keeping track of every value in the program is expensive, but this is
+ a debugging pass.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="domfrontier">-domfrontier: Dominance Frontier Construction</a>
+</h3>
+<div>
+ <p>
+ This pass is a simple dominator construction algorithm for finding forward
+ dominator frontiers.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="domtree">-domtree: Dominator Tree Construction</a>
+</h3>
+<div>
+ <p>
+ This pass is a simple dominator construction algorithm for finding forward
+ dominators.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="dot-callgraph">-dot-callgraph: Print Call Graph to 'dot' file</a>
+</h3>
+<div>
+ <p>
+ This pass, only available in <code>opt</code>, prints the call graph into a
+ <code>.dot</code> graph. This graph can then be processed with the "dot" tool
+ to convert it to postscript or some other suitable format.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="dot-cfg">-dot-cfg: Print CFG of function to 'dot' file</a>
+</h3>
+<div>
+ <p>
+ This pass, only available in <code>opt</code>, prints the control flow graph
+ into a <code>.dot</code> graph. This graph can then be processed with the
+ "dot" tool to convert it to postscript or some other suitable format.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="dot-cfg-only">-dot-cfg-only: Print CFG of function to 'dot' file (with no function bodies)</a>
+</h3>
+<div>
+ <p>
+ This pass, only available in <code>opt</code>, prints the control flow graph
+ into a <code>.dot</code> graph, omitting the function bodies. This graph can
+ then be processed with the "dot" tool to convert it to postscript or some
+ other suitable format.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="dot-dom">-dot-dom: Print dominance tree of function to 'dot' file</a>
+</h3>
+<div>
+ <p>
+ This pass, only available in <code>opt</code>, prints the dominator tree
+ into a <code>.dot</code> graph. This graph can then be processed with the
+ "dot" tool to convert it to postscript or some other suitable format.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="dot-dom-only">-dot-dom-only: Print dominance tree of function to 'dot' file (with no function bodies)</a>
+</h3>
+<div>
+ <p>
+ This pass, only available in <code>opt</code>, prints the dominator tree
+ into a <code>.dot</code> graph, omitting the function bodies. This graph can
+ then be processed with the "dot" tool to convert it to postscript or some
+ other suitable format.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="dot-postdom">-dot-postdom: Print postdominance tree of function to 'dot' file</a>
+</h3>
+<div>
+ <p>
+ This pass, only available in <code>opt</code>, prints the post dominator tree
+ into a <code>.dot</code> graph. This graph can then be processed with the
+ "dot" tool to convert it to postscript or some other suitable format.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="dot-postdom-only">-dot-postdom-only: Print postdominance tree of function to 'dot' file (with no function bodies)</a>
+</h3>
+<div>
+ <p>
+ This pass, only available in <code>opt</code>, prints the post dominator tree
+ into a <code>.dot</code> graph, omitting the function bodies. This graph can
+ then be processed with the "dot" tool to convert it to postscript or some
+ other suitable format.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="globalsmodref-aa">-globalsmodref-aa: Simple mod/ref analysis for globals</a>
+</h3>
+<div>
+ <p>
+ This simple pass provides alias and mod/ref information for global values
+ that do not have their address taken, and keeps track of whether functions
+ read or write memory (are "pure"). For this simple (but very common) case,
+ we can provide pretty accurate and useful information.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="instcount">-instcount: Counts the various types of Instructions</a>
+</h3>
+<div>
+ <p>
+ This pass collects the count of all instructions and reports them
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="intervals">-intervals: Interval Partition Construction</a>
+</h3>
+<div>
+ <p>
+ This analysis calculates and represents the interval partition of a function,
+ or a preexisting interval partition.
+ </p>
+
+ <p>
+ In this way, the interval partition may be used to reduce a flow graph down
+ to its degenerate single node interval partition (unless it is irreducible).
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="iv-users">-iv-users: Induction Variable Users</a>
+</h3>
+<div>
+ <p>Bookkeeping for "interesting" users of expressions computed from
+ induction variables.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="lazy-value-info">-lazy-value-info: Lazy Value Information Analysis</a>
+</h3>
+<div>
+ <p>Interface for lazy computation of value constraint information.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="lda">-lda: Loop Dependence Analysis</a>
+</h3>
+<div>
+ <p>Loop dependence analysis framework, which is used to detect dependences in
+ memory accesses in loops.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="libcall-aa">-libcall-aa: LibCall Alias Analysis</a>
+</h3>
+<div>
+ <p>LibCall Alias Analysis.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="lint">-lint: Statically lint-checks LLVM IR</a>
+</h3>
+<div>
+ <p>This pass statically checks for common and easily-identified constructs
+ which produce undefined or likely unintended behavior in LLVM IR.</p>
+
+ <p>It is not a guarantee of correctness, in two ways. First, it isn't
+ comprehensive. There are checks which could be done statically which are
+ not yet implemented. Some of these are indicated by TODO comments, but
+ those aren't comprehensive either. Second, many conditions cannot be
+ checked statically. This pass does no dynamic instrumentation, so it
+ can't check for all possible problems.</p>
+
+ <p>Another limitation is that it assumes all code will be executed. A store
+ through a null pointer in a basic block which is never reached is harmless,
+ but this pass will warn about it anyway.</p>
+
+ <p>Optimization passes may make conditions that this pass checks for more or
+ less obvious. If an optimization pass appears to be introducing a warning,
+ it may be that the optimization pass is merely exposing an existing
+ condition in the code.</p>
+
+ <p>This code may be run before instcombine. In many cases, instcombine checks
+ for the same kinds of things and turns instructions with undefined behavior
+ into unreachable (or equivalent). Because of this, this pass makes some
+ effort to look through bitcasts and so on.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="loops">-loops: Natural Loop Information</a>
+</h3>
+<div>
+ <p>
+ This analysis is used to identify natural loops and determine the loop depth
+ of various nodes of the CFG. Note that the loops identified may actually be
+ several natural loops that share the same header node... not just a single
+ natural loop.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="memdep">-memdep: Memory Dependence Analysis</a>
+</h3>
+<div>
+ <p>
+ An analysis that determines, for a given memory operation, what preceding
+ memory operations it depends on. It builds on alias analysis information, and
+ tries to provide a lazy, caching interface to a common kind of alias
+ information query.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="module-debuginfo">-module-debuginfo: Decodes module-level debug info</a>
+</h3>
+<div>
+ <p>This pass decodes the debug info metadata in a module and prints in a
+ (sufficiently-prepared-) human-readable form.
+
+ For example, run this pass from opt along with the -analyze option, and
+ it'll print to standard output.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="no-aa">-no-aa: No Alias Analysis (always returns 'may' alias)</a>
+</h3>
+<div>
+ <p>
+ This is the default implementation of the Alias Analysis interface. It always
+ returns "I don't know" for alias queries. NoAA is unlike other alias analysis
+ implementations, in that it does not chain to a previous analysis. As such it
+ doesn't follow many of the rules that other alias analyses must.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="no-profile">-no-profile: No Profile Information</a>
+</h3>
+<div>
+ <p>
+ The default "no profile" implementation of the abstract
+ <code>ProfileInfo</code> interface.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="postdomfrontier">-postdomfrontier: Post-Dominance Frontier Construction</a>
+</h3>
+<div>
+ <p>
+ This pass is a simple post-dominator construction algorithm for finding
+ post-dominator frontiers.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="postdomtree">-postdomtree: Post-Dominator Tree Construction</a>
+</h3>
+<div>
+ <p>
+ This pass is a simple post-dominator construction algorithm for finding
+ post-dominators.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="print-alias-sets">-print-alias-sets: Alias Set Printer</a>
+</h3>
+<div>
+ <p>Yet to be written.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="print-callgraph">-print-callgraph: Print a call graph</a>
+</h3>
+<div>
+ <p>
+ This pass, only available in <code>opt</code>, prints the call graph to
+ standard error in a human-readable form.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="print-callgraph-sccs">-print-callgraph-sccs: Print SCCs of the Call Graph</a>
+</h3>
+<div>
+ <p>
+ This pass, only available in <code>opt</code>, prints the SCCs of the call
+ graph to standard error in a human-readable form.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="print-cfg-sccs">-print-cfg-sccs: Print SCCs of each function CFG</a>
+</h3>
+<div>
+ <p>
+ This pass, only available in <code>opt</code>, prints the SCCs of each
+ function CFG to standard error in a human-readable form.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="print-dbginfo">-print-dbginfo: Print debug info in human readable form</a>
+</h3>
+<div>
+ <p>Pass that prints instructions, and associated debug info:</p>
+ <ul>
+
+ <li>source/line/col information</li>
+ <li>original variable name</li>
+ <li>original type name</li>
+ </ul>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="print-dom-info">-print-dom-info: Dominator Info Printer</a>
+</h3>
+<div>
+ <p>Dominator Info Printer.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="print-externalfnconstants">-print-externalfnconstants: Print external fn callsites passed constants</a>
+</h3>
+<div>
+ <p>
+ This pass, only available in <code>opt</code>, prints out call sites to
+ external functions that are called with constant arguments. This can be
+ useful when looking for standard library functions we should constant fold
+ or handle in alias analyses.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="print-function">-print-function: Print function to stderr</a>
+</h3>
+<div>
+ <p>
+ The <code>PrintFunctionPass</code> class is designed to be pipelined with
+ other <code>FunctionPass</code>es, and prints out the functions of the module
+ as they are processed.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="print-module">-print-module: Print module to stderr</a>
+</h3>
+<div>
+ <p>
+ This pass simply prints out the entire module when it is executed.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="print-used-types">-print-used-types: Find Used Types</a>
+</h3>
+<div>
+ <p>
+ This pass is used to seek out all of the types in use by the program. Note
+ that this analysis explicitly does not include types only used by the symbol
+ table.
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="profile-estimator">-profile-estimator: Estimate profiling information</a>
+</h3>
+<div>
+ <p>Profiling information that estimates the profiling information
+ in a very crude and unimaginative way.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="profile-loader">-profile-loader: Load profile information from llvmprof.out</a>
+</h3>
+<div>
+ <p>
+ A concrete implementation of profiling information that loads the information
+ from a profile dump file.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="profile-verifier">-profile-verifier: Verify profiling information</a>
+</h3>
+<div>
+ <p>Pass that checks profiling information for plausibility.</p>
+</div>
+<h3>
+ <a name="regions">-regions: Detect single entry single exit regions</a>
+</h3>
+<div>
+ <p>
+ The <code>RegionInfo</code> pass detects single entry single exit regions in a
+ function, where a region is defined as any subgraph that is connected to the
+ remaining graph at only two spots. Furthermore, an hierarchical region tree is
+ built.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="scalar-evolution">-scalar-evolution: Scalar Evolution Analysis</a>
+</h3>
+<div>
+ <p>
+ The <code>ScalarEvolution</code> analysis can be used to analyze and
+ catagorize scalar expressions in loops. It specializes in recognizing general
+ induction variables, representing them with the abstract and opaque
+ <code>SCEV</code> class. Given this analysis, trip counts of loops and other
+ important properties can be obtained.
+ </p>
+
+ <p>
+ This analysis is primarily useful for induction variable substitution and
+ strength reduction.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="scev-aa">-scev-aa: ScalarEvolution-based Alias Analysis</a>
+</h3>
+<div>
+ <p>Simple alias analysis implemented in terms of ScalarEvolution queries.
+
+ This differs from traditional loop dependence analysis in that it tests
+ for dependencies within a single iteration of a loop, rather than
+ dependencies between different iterations.
+
+ ScalarEvolution has a more complete understanding of pointer arithmetic
+ than BasicAliasAnalysis' collection of ad-hoc analyses.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="targetdata">-targetdata: Target Data Layout</a>
+</h3>
+<div>
+ <p>Provides other passes access to information on how the size and alignment
+ required by the target ABI for various data types.</p>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h2><a name="transforms">Transform Passes</a></h2>
+<div>
+ <p>This section describes the LLVM Transform Passes.</p>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="adce">-adce: Aggressive Dead Code Elimination</a>
+</h3>
+<div>
+ <p>ADCE aggressively tries to eliminate code. This pass is similar to
+ <a href="#dce">DCE</a> but it assumes that values are dead until proven
+ otherwise. This is similar to <a href="#sccp">SCCP</a>, except applied to
+ the liveness of values.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="always-inline">-always-inline: Inliner for always_inline functions</a>
+</h3>
+<div>
+ <p>A custom inliner that handles only functions that are marked as
+ "always inline".</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="argpromotion">-argpromotion: Promote 'by reference' arguments to scalars</a>
+</h3>
+<div>
+ <p>
+ This pass promotes "by reference" arguments to be "by value" arguments. In
+ practice, this means looking for internal functions that have pointer
+ arguments. If it can prove, through the use of alias analysis, that an
+ argument is *only* loaded, then it can pass the value into the function
+ instead of the address of the value. This can cause recursive simplification
+ of code and lead to the elimination of allocas (especially in C++ template
+ code like the STL).
+ </p>
+
+ <p>
+ This pass also handles aggregate arguments that are passed into a function,
+ scalarizing them if the elements of the aggregate are only loaded. Note that
+ it refuses to scalarize aggregates which would require passing in more than
+ three operands to the function, because passing thousands of operands for a
+ large array or structure is unprofitable!
+ </p>
+
+ <p>
+ Note that this transformation could also be done for arguments that are only
+ stored to (returning the value instead), but does not currently. This case
+ would be best handled when and if LLVM starts supporting multiple return
+ values from functions.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="bb-vectorize">-bb-vectorize: Basic-Block Vectorization</a>
+</h3>
+<div>
+ <p>This pass combines instructions inside basic blocks to form vector
+ instructions. It iterates over each basic block, attempting to pair
+ compatible instructions, repeating this process until no additional
+ pairs are selected for vectorization. When the outputs of some pair
+ of compatible instructions are used as inputs by some other pair of
+ compatible instructions, those pairs are part of a potential
+ vectorization chain. Instruction pairs are only fused into vector
+ instructions when they are part of a chain longer than some
+ threshold length. Moreover, the pass attempts to find the best
+ possible chain for each pair of compatible instructions. These
+ heuristics are intended to prevent vectorization in cases where
+ it would not yield a performance increase of the resulting code.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="block-placement">-block-placement: Profile Guided Basic Block Placement</a>
+</h3>
+<div>
+ <p>This pass is a very simple profile guided basic block placement algorithm.
+ The idea is to put frequently executed blocks together at the start of the
+ function and hopefully increase the number of fall-through conditional
+ branches. If there is no profile information for a particular function, this
+ pass basically orders blocks in depth-first order.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="break-crit-edges">-break-crit-edges: Break critical edges in CFG</a>
+</h3>
+<div>
+ <p>
+ Break all of the critical edges in the CFG by inserting a dummy basic block.
+ It may be "required" by passes that cannot deal with critical edges. This
+ transformation obviously invalidates the CFG, but can update forward dominator
+ (set, immediate dominators, tree, and frontier) information.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="codegenprepare">-codegenprepare: Optimize for code generation</a>
+</h3>
+<div>
+ This pass munges the code in the input function to better prepare it for
+ SelectionDAG-based code generation. This works around limitations in it's
+ basic-block-at-a-time approach. It should eventually be removed.
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="constmerge">-constmerge: Merge Duplicate Global Constants</a>
+</h3>
+<div>
+ <p>
+ Merges duplicate global constants together into a single constant that is
+ shared. This is useful because some passes (ie TraceValues) insert a lot of
+ string constants into the program, regardless of whether or not an existing
+ string is available.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="constprop">-constprop: Simple constant propagation</a>
+</h3>
+<div>
+ <p>This file implements constant propagation and merging. It looks for
+ instructions involving only constant operands and replaces them with a
+ constant value instead of an instruction. For example:</p>
+ <blockquote><pre>add i32 1, 2</pre></blockquote>
+ <p>becomes</p>
+ <blockquote><pre>i32 3</pre></blockquote>
+ <p>NOTE: this pass has a habit of making definitions be dead. It is a good
+ idea to to run a <a href="#die">DIE</a> (Dead Instruction Elimination) pass
+ sometime after running this pass.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="dce">-dce: Dead Code Elimination</a>
+</h3>
+<div>
+ <p>
+ Dead code elimination is similar to <a href="#die">dead instruction
+ elimination</a>, but it rechecks instructions that were used by removed
+ instructions to see if they are newly dead.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="deadargelim">-deadargelim: Dead Argument Elimination</a>
+</h3>
+<div>
+ <p>
+ This pass deletes dead arguments from internal functions. Dead argument
+ elimination removes arguments which are directly dead, as well as arguments
+ only passed into function calls as dead arguments of other functions. This
+ pass also deletes dead arguments in a similar way.
+ </p>
+
+ <p>
+ This pass is often useful as a cleanup pass to run after aggressive
+ interprocedural passes, which add possibly-dead arguments.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="deadtypeelim">-deadtypeelim: Dead Type Elimination</a>
+</h3>
+<div>
+ <p>
+ This pass is used to cleanup the output of GCC. It eliminate names for types
+ that are unused in the entire translation unit, using the <a
+ href="#findusedtypes">find used types</a> pass.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="die">-die: Dead Instruction Elimination</a>
+</h3>
+<div>
+ <p>
+ Dead instruction elimination performs a single pass over the function,
+ removing instructions that are obviously dead.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="dse">-dse: Dead Store Elimination</a>
+</h3>
+<div>
+ <p>
+ A trivial dead store elimination that only considers basic-block local
+ redundant stores.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="functionattrs">-functionattrs: Deduce function attributes</a>
+</h3>
+<div>
+ <p>A simple interprocedural pass which walks the call-graph, looking for
+ functions which do not access or only read non-local memory, and marking them
+ readnone/readonly. In addition, it marks function arguments (of pointer type)
+ 'nocapture' if a call to the function does not create any copies of the pointer
+ value that outlive the call. This more or less means that the pointer is only
+ dereferenced, and not returned from the function or stored in a global.
+ This pass is implemented as a bottom-up traversal of the call-graph.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="globaldce">-globaldce: Dead Global Elimination</a>
+</h3>
+<div>
+ <p>
+ This transform is designed to eliminate unreachable internal globals from the
+ program. It uses an aggressive algorithm, searching out globals that are
+ known to be alive. After it finds all of the globals which are needed, it
+ deletes whatever is left over. This allows it to delete recursive chunks of
+ the program which are unreachable.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="globalopt">-globalopt: Global Variable Optimizer</a>
+</h3>
+<div>
+ <p>
+ This pass transforms simple global variables that never have their address
+ taken. If obviously true, it marks read/write globals as constant, deletes
+ variables only stored to, etc.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="gvn">-gvn: Global Value Numbering</a>
+</h3>
+<div>
+ <p>
+ This pass performs global value numbering to eliminate fully and partially
+ redundant instructions. It also performs redundant load elimination.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="indvars">-indvars: Canonicalize Induction Variables</a>
+</h3>
+<div>
+ <p>
+ This transformation analyzes and transforms the induction variables (and
+ computations derived from them) into simpler forms suitable for subsequent
+ analysis and transformation.
+ </p>
+
+ <p>
+ This transformation makes the following changes to each loop with an
+ identifiable induction variable:
+ </p>
+
+ <ol>
+ <li>All loops are transformed to have a <em>single</em> canonical
+ induction variable which starts at zero and steps by one.</li>
+ <li>The canonical induction variable is guaranteed to be the first PHI node
+ in the loop header block.</li>
+ <li>Any pointer arithmetic recurrences are raised to use array
+ subscripts.</li>
+ </ol>
+
+ <p>
+ If the trip count of a loop is computable, this pass also makes the following
+ changes:
+ </p>
+
+ <ol>
+ <li>The exit condition for the loop is canonicalized to compare the
+ induction value against the exit value. This turns loops like:
+ <blockquote><pre>for (i = 7; i*i < 1000; ++i)</pre></blockquote>
+ into
+ <blockquote><pre>for (i = 0; i != 25; ++i)</pre></blockquote></li>
+ <li>Any use outside of the loop of an expression derived from the indvar
+ is changed to compute the derived value outside of the loop, eliminating
+ the dependence on the exit value of the induction variable. If the only
+ purpose of the loop is to compute the exit value of some derived
+ expression, this transformation will make the loop dead.</li>
+ </ol>
+
+ <p>
+ This transformation should be followed by strength reduction after all of the
+ desired loop transformations have been performed. Additionally, on targets
+ where it is profitable, the loop could be transformed to count down to zero
+ (the "do loop" optimization).
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="inline">-inline: Function Integration/Inlining</a>
+</h3>
+<div>
+ <p>
+ Bottom-up inlining of functions into callees.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="insert-edge-profiling">-insert-edge-profiling: Insert instrumentation for edge profiling</a>
+</h3>
+<div>
+ <p>
+ This pass instruments the specified program with counters for edge profiling.
+ Edge profiling can give a reasonable approximation of the hot paths through a
+ program, and is used for a wide variety of program transformations.
+ </p>
+
+ <p>
+ Note that this implementation is very naïve. It inserts a counter for
+ <em>every</em> edge in the program, instead of using control flow information
+ to prune the number of counters inserted.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="insert-optimal-edge-profiling">-insert-optimal-edge-profiling: Insert optimal instrumentation for edge profiling</a>
+</h3>
+<div>
+ <p>This pass instruments the specified program with counters for edge profiling.
+ Edge profiling can give a reasonable approximation of the hot paths through a
+ program, and is used for a wide variety of program transformations.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="instcombine">-instcombine: Combine redundant instructions</a>
+</h3>
+<div>
+ <p>
+ Combine instructions to form fewer, simple
+ instructions. This pass does not modify the CFG This pass is where algebraic
+ simplification happens.
+ </p>
+
+ <p>
+ This pass combines things like:
+ </p>
+
+<blockquote><pre
+>%Y = add i32 %X, 1
+%Z = add i32 %Y, 1</pre></blockquote>
+
+ <p>
+ into:
+ </p>
+
+<blockquote><pre
+>%Z = add i32 %X, 2</pre></blockquote>
+
+ <p>
+ This is a simple worklist driven algorithm.
+ </p>
+
+ <p>
+ This pass guarantees that the following canonicalizations are performed on
+ the program:
+ </p>
+
+ <ul>
+ <li>If a binary operator has a constant operand, it is moved to the right-
+ hand side.</li>
+ <li>Bitwise operators with constant operands are always grouped so that
+ shifts are performed first, then <code>or</code>s, then
+ <code>and</code>s, then <code>xor</code>s.</li>
+ <li>Compare instructions are converted from <code>&lt;</code>,
+ <code>&gt;</code>, <code>≤</code>, or <code>≥</code> to
+ <code>=</code> or <code>≠</code> if possible.</li>
+ <li>All <code>cmp</code> instructions on boolean values are replaced with
+ logical operations.</li>
+ <li><code>add <var>X</var>, <var>X</var></code> is represented as
+ <code>mul <var>X</var>, 2</code> ⇒ <code>shl <var>X</var>, 1</code></li>
+ <li>Multiplies with a constant power-of-two argument are transformed into
+ shifts.</li>
+ <li>… etc.</li>
+ </ul>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="internalize">-internalize: Internalize Global Symbols</a>
+</h3>
+<div>
+ <p>
+ This pass loops over all of the functions in the input module, looking for a
+ main function. If a main function is found, all other functions and all
+ global variables with initializers are marked as internal.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="ipconstprop">-ipconstprop: Interprocedural constant propagation</a>
+</h3>
+<div>
+ <p>
+ This pass implements an <em>extremely</em> simple interprocedural constant
+ propagation pass. It could certainly be improved in many different ways,
+ like using a worklist. This pass makes arguments dead, but does not remove
+ them. The existing dead argument elimination pass should be run after this
+ to clean up the mess.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="ipsccp">-ipsccp: Interprocedural Sparse Conditional Constant Propagation</a>
+</h3>
+<div>
+ <p>
+ An interprocedural variant of <a href="#sccp">Sparse Conditional Constant
+ Propagation</a>.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="jump-threading">-jump-threading: Jump Threading</a>
+</h3>
+<div>
+ <p>
+ Jump threading tries to find distinct threads of control flow running through
+ a basic block. This pass looks at blocks that have multiple predecessors and
+ multiple successors. If one or more of the predecessors of the block can be
+ proven to always cause a jump to one of the successors, we forward the edge
+ from the predecessor to the successor by duplicating the contents of this
+ block.
+ </p>
+ <p>
+ An example of when this can occur is code like this:
+ </p>
+
+ <pre
+>if () { ...
+ X = 4;
+}
+if (X &lt; 3) {</pre>
+
+ <p>
+ In this case, the unconditional branch at the end of the first if can be
+ revectored to the false side of the second if.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="lcssa">-lcssa: Loop-Closed SSA Form Pass</a>
+</h3>
+<div>
+ <p>
+ This pass transforms loops by placing phi nodes at the end of the loops for
+ all values that are live across the loop boundary. For example, it turns
+ the left into the right code:
+ </p>
+
+ <pre
+>for (...) for (...)
+ if (c) if (c)
+ X1 = ... X1 = ...
+ else else
+ X2 = ... X2 = ...
+ X3 = phi(X1, X2) X3 = phi(X1, X2)
+... = X3 + 4 X4 = phi(X3)
+ ... = X4 + 4</pre>
+
+ <p>
+ This is still valid LLVM; the extra phi nodes are purely redundant, and will
+ be trivially eliminated by <code>InstCombine</code>. The major benefit of
+ this transformation is that it makes many other loop optimizations, such as
+ LoopUnswitching, simpler.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="licm">-licm: Loop Invariant Code Motion</a>
+</h3>
+<div>
+ <p>
+ This pass performs loop invariant code motion, attempting to remove as much
+ code from the body of a loop as possible. It does this by either hoisting
+ code into the preheader block, or by sinking code to the exit blocks if it is
+ safe. This pass also promotes must-aliased memory locations in the loop to
+ live in registers, thus hoisting and sinking "invariant" loads and stores.
+ </p>
+
+ <p>
+ This pass uses alias analysis for two purposes:
+ </p>
+
+ <ul>
+ <li>Moving loop invariant loads and calls out of loops. If we can determine
+ that a load or call inside of a loop never aliases anything stored to,
+ we can hoist it or sink it like any other instruction.</li>
+ <li>Scalar Promotion of Memory - If there is a store instruction inside of
+ the loop, we try to move the store to happen AFTER the loop instead of
+ inside of the loop. This can only happen if a few conditions are true:
+ <ul>
+ <li>The pointer stored through is loop invariant.</li>
+ <li>There are no stores or loads in the loop which <em>may</em> alias
+ the pointer. There are no calls in the loop which mod/ref the
+ pointer.</li>
+ </ul>
+ If these conditions are true, we can promote the loads and stores in the
+ loop of the pointer to use a temporary alloca'd variable. We then use
+ the mem2reg functionality to construct the appropriate SSA form for the
+ variable.</li>
+ </ul>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="loop-deletion">-loop-deletion: Delete dead loops</a>
+</h3>
+<div>
+ <p>
+ This file implements the Dead Loop Deletion Pass. This pass is responsible
+ for eliminating loops with non-infinite computable trip counts that have no
+ side effects or volatile instructions, and do not contribute to the
+ computation of the function's return value.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="loop-extract">-loop-extract: Extract loops into new functions</a>
+</h3>
+<div>
+ <p>
+ A pass wrapper around the <code>ExtractLoop()</code> scalar transformation to
+ extract each top-level loop into its own new function. If the loop is the
+ <em>only</em> loop in a given function, it is not touched. This is a pass most
+ useful for debugging via bugpoint.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="loop-extract-single">-loop-extract-single: Extract at most one loop into a new function</a>
+</h3>
+<div>
+ <p>
+ Similar to <a href="#loop-extract">Extract loops into new functions</a>,
+ this pass extracts one natural loop from the program into a function if it
+ can. This is used by bugpoint.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="loop-reduce">-loop-reduce: Loop Strength Reduction</a>
+</h3>
+<div>
+ <p>
+ This pass performs a strength reduction on array references inside loops that
+ have as one or more of their components the loop induction variable. This is
+ accomplished by creating a new value to hold the initial value of the array
+ access for the first iteration, and then creating a new GEP instruction in
+ the loop to increment the value by the appropriate amount.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="loop-rotate">-loop-rotate: Rotate Loops</a>
+</h3>
+<div>
+ <p>A simple loop rotation transformation.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="loop-simplify">-loop-simplify: Canonicalize natural loops</a>
+</h3>
+<div>
+ <p>
+ This pass performs several transformations to transform natural loops into a
+ simpler form, which makes subsequent analyses and transformations simpler and
+ more effective.
+ </p>
+
+ <p>
+ Loop pre-header insertion guarantees that there is a single, non-critical
+ entry edge from outside of the loop to the loop header. This simplifies a
+ number of analyses and transformations, such as LICM.
+ </p>
+
+ <p>
+ Loop exit-block insertion guarantees that all exit blocks from the loop
+ (blocks which are outside of the loop that have predecessors inside of the
+ loop) only have predecessors from inside of the loop (and are thus dominated
+ by the loop header). This simplifies transformations such as store-sinking
+ that are built into LICM.
+ </p>
+
+ <p>
+ This pass also guarantees that loops will have exactly one backedge.
+ </p>
+
+ <p>
+ Note that the simplifycfg pass will clean up blocks which are split out but
+ end up being unnecessary, so usage of this pass should not pessimize
+ generated code.
+ </p>
+
+ <p>
+ This pass obviously modifies the CFG, but updates loop information and
+ dominator information.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="loop-unroll">-loop-unroll: Unroll loops</a>
+</h3>
+<div>
+ <p>
+ This pass implements a simple loop unroller. It works best when loops have
+ been canonicalized by the <a href="#indvars"><tt>-indvars</tt></a> pass,
+ allowing it to determine the trip counts of loops easily.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="loop-unswitch">-loop-unswitch: Unswitch loops</a>
+</h3>
+<div>
+ <p>
+ This pass transforms loops that contain branches on loop-invariant conditions
+ to have multiple loops. For example, it turns the left into the right code:
+ </p>
+
+ <pre
+>for (...) if (lic)
+ A for (...)
+ if (lic) A; B; C
+ B else
+ C for (...)
+ A; C</pre>
+
+ <p>
+ This can increase the size of the code exponentially (doubling it every time
+ a loop is unswitched) so we only unswitch if the resultant code will be
+ smaller than a threshold.
+ </p>
+
+ <p>
+ This pass expects LICM to be run before it to hoist invariant conditions out
+ of the loop, to make the unswitching opportunity obvious.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="loweratomic">-loweratomic: Lower atomic intrinsics to non-atomic form</a>
+</h3>
+<div>
+ <p>
+ This pass lowers atomic intrinsics to non-atomic form for use in a known
+ non-preemptible environment.
+ </p>
+
+ <p>
+ The pass does not verify that the environment is non-preemptible (in
+ general this would require knowledge of the entire call graph of the
+ program including any libraries which may not be available in bitcode form);
+ it simply lowers every atomic intrinsic.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="lowerinvoke">-lowerinvoke: Lower invoke and unwind, for unwindless code generators</a>
+</h3>
+<div>
+ <p>
+ This transformation is designed for use by code generators which do not yet
+ support stack unwinding. This pass supports two models of exception handling
+ lowering, the 'cheap' support and the 'expensive' support.
+ </p>
+
+ <p>
+ 'Cheap' exception handling support gives the program the ability to execute
+ any program which does not "throw an exception", by turning 'invoke'
+ instructions into calls and by turning 'unwind' instructions into calls to
+ abort(). If the program does dynamically use the unwind instruction, the
+ program will print a message then abort.
+ </p>
+
+ <p>
+ 'Expensive' exception handling support gives the full exception handling
+ support to the program at the cost of making the 'invoke' instruction
+ really expensive. It basically inserts setjmp/longjmp calls to emulate the
+ exception handling as necessary.
+ </p>
+
+ <p>
+ Because the 'expensive' support slows down programs a lot, and EH is only
+ used for a subset of the programs, it must be specifically enabled by the
+ <tt>-enable-correct-eh-support</tt> option.
+ </p>
+
+ <p>
+ Note that after this pass runs the CFG is not entirely accurate (exceptional
+ control flow edges are not correct anymore) so only very simple things should
+ be done after the lowerinvoke pass has run (like generation of native code).
+ This should not be used as a general purpose "my LLVM-to-LLVM pass doesn't
+ support the invoke instruction yet" lowering pass.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="lowerswitch">-lowerswitch: Lower SwitchInst's to branches</a>
+</h3>
+<div>
+ <p>
+ Rewrites <tt>switch</tt> instructions with a sequence of branches, which
+ allows targets to get away with not implementing the switch instruction until
+ it is convenient.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="mem2reg">-mem2reg: Promote Memory to Register</a>
+</h3>
+<div>
+ <p>
+ This file promotes memory references to be register references. It promotes
+ <tt>alloca</tt> instructions which only have <tt>load</tt>s and
+ <tt>store</tt>s as uses. An <tt>alloca</tt> is transformed by using dominator
+ frontiers to place <tt>phi</tt> nodes, then traversing the function in
+ depth-first order to rewrite <tt>load</tt>s and <tt>store</tt>s as
+ appropriate. This is just the standard SSA construction algorithm to construct
+ "pruned" SSA form.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="memcpyopt">-memcpyopt: MemCpy Optimization</a>
+</h3>
+<div>
+ <p>
+ This pass performs various transformations related to eliminating memcpy
+ calls, or transforming sets of stores into memset's.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="mergefunc">-mergefunc: Merge Functions</a>
+</h3>
+<div>
+ <p>This pass looks for equivalent functions that are mergable and folds them.
+
+ A hash is computed from the function, based on its type and number of
+ basic blocks.
+
+ Once all hashes are computed, we perform an expensive equality comparison
+ on each function pair. This takes n^2/2 comparisons per bucket, so it's
+ important that the hash function be high quality. The equality comparison
+ iterates through each instruction in each basic block.
+
+ When a match is found the functions are folded. If both functions are
+ overridable, we move the functionality into a new internal function and
+ leave two overridable thunks to it.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="mergereturn">-mergereturn: Unify function exit nodes</a>
+</h3>
+<div>
+ <p>
+ Ensure that functions have at most one <tt>ret</tt> instruction in them.
+ Additionally, it keeps track of which node is the new exit node of the CFG.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="partial-inliner">-partial-inliner: Partial Inliner</a>
+</h3>
+<div>
+ <p>This pass performs partial inlining, typically by inlining an if
+ statement that surrounds the body of the function.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="prune-eh">-prune-eh: Remove unused exception handling info</a>
+</h3>
+<div>
+ <p>
+ This file implements a simple interprocedural pass which walks the call-graph,
+ turning <tt>invoke</tt> instructions into <tt>call</tt> instructions if and
+ only if the callee cannot throw an exception. It implements this as a
+ bottom-up traversal of the call-graph.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="reassociate">-reassociate: Reassociate expressions</a>
+</h3>
+<div>
+ <p>
+ This pass reassociates commutative expressions in an order that is designed
+ to promote better constant propagation, GCSE, LICM, PRE, etc.
+ </p>
+
+ <p>
+ For example: 4 + (<var>x</var> + 5) ⇒ <var>x</var> + (4 + 5)
+ </p>
+
+ <p>
+ In the implementation of this algorithm, constants are assigned rank = 0,
+ function arguments are rank = 1, and other values are assigned ranks
+ corresponding to the reverse post order traversal of current function
+ (starting at 2), which effectively gives values in deep loops higher rank
+ than values not in loops.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="reg2mem">-reg2mem: Demote all values to stack slots</a>
+</h3>
+<div>
+ <p>
+ This file demotes all registers to memory references. It is intended to be
+ the inverse of <a href="#mem2reg"><tt>-mem2reg</tt></a>. By converting to
+ <tt>load</tt> instructions, the only values live across basic blocks are
+ <tt>alloca</tt> instructions and <tt>load</tt> instructions before
+ <tt>phi</tt> nodes. It is intended that this should make CFG hacking much
+ easier. To make later hacking easier, the entry block is split into two, such
+ that all introduced <tt>alloca</tt> instructions (and nothing else) are in the
+ entry block.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="scalarrepl">-scalarrepl: Scalar Replacement of Aggregates (DT)</a>
+</h3>
+<div>
+ <p>
+ The well-known scalar replacement of aggregates transformation. This
+ transform breaks up <tt>alloca</tt> instructions of aggregate type (structure
+ or array) into individual <tt>alloca</tt> instructions for each member if
+ possible. Then, if possible, it transforms the individual <tt>alloca</tt>
+ instructions into nice clean scalar SSA form.
+ </p>
+
+ <p>
+ This combines a simple scalar replacement of aggregates algorithm with the <a
+ href="#mem2reg"><tt>mem2reg</tt></a> algorithm because often interact,
+ especially for C++ programs. As such, iterating between <tt>scalarrepl</tt>,
+ then <a href="#mem2reg"><tt>mem2reg</tt></a> until we run out of things to
+ promote works well.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="sccp">-sccp: Sparse Conditional Constant Propagation</a>
+</h3>
+<div>
+ <p>
+ Sparse conditional constant propagation and merging, which can be summarized
+ as:
+ </p>
+
+ <ol>
+ <li>Assumes values are constant unless proven otherwise</li>
+ <li>Assumes BasicBlocks are dead unless proven otherwise</li>
+ <li>Proves values to be constant, and replaces them with constants</li>
+ <li>Proves conditional branches to be unconditional</li>
+ </ol>
+
+ <p>
+ Note that this pass has a habit of making definitions be dead. It is a good
+ idea to to run a DCE pass sometime after running this pass.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="simplify-libcalls">-simplify-libcalls: Simplify well-known library calls</a>
+</h3>
+<div>
+ <p>
+ Applies a variety of small optimizations for calls to specific well-known
+ function calls (e.g. runtime library functions). For example, a call
+ <tt>exit(3)</tt> that occurs within the <tt>main()</tt> function can be
+ transformed into simply <tt>return 3</tt>.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="simplifycfg">-simplifycfg: Simplify the CFG</a>
+</h3>
+<div>
+ <p>
+ Performs dead code elimination and basic block merging. Specifically:
+ </p>
+
+ <ol>
+ <li>Removes basic blocks with no predecessors.</li>
+ <li>Merges a basic block into its predecessor if there is only one and the
+ predecessor only has one successor.</li>
+ <li>Eliminates PHI nodes for basic blocks with a single predecessor.</li>
+ <li>Eliminates a basic block that only contains an unconditional
+ branch.</li>
+ </ol>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="sink">-sink: Code sinking</a>
+</h3>
+<div>
+ <p>This pass moves instructions into successor blocks, when possible, so that
+ they aren't executed on paths where their results aren't needed.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="sretpromotion">-sretpromotion: Promote sret arguments to multiple ret values</a>
+</h3>
+<div>
+ <p>
+ This pass finds functions that return a struct (using a pointer to the struct
+ as the first argument of the function, marked with the '<tt>sret</tt>' attribute) and
+ replaces them with a new function that simply returns each of the elements of
+ that struct (using multiple return values).
+ </p>
+
+ <p>
+ This pass works under a number of conditions:
+ </p>
+
+ <ul>
+ <li>The returned struct must not contain other structs</li>
+ <li>The returned struct must only be used to load values from</li>
+ <li>The placeholder struct passed in is the result of an <tt>alloca</tt></li>
+ </ul>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="strip">-strip: Strip all symbols from a module</a>
+</h3>
+<div>
+ <p>
+ performs code stripping. this transformation can delete:
+ </p>
+
+ <ol>
+ <li>names for virtual registers</li>
+ <li>symbols for internal globals and functions</li>
+ <li>debug information</li>
+ </ol>
+
+ <p>
+ note that this transformation makes code much less readable, so it should
+ only be used in situations where the <tt>strip</tt> utility would be used,
+ such as reducing code size or making it harder to reverse engineer code.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="strip-dead-debug-info">-strip-dead-debug-info: Strip debug info for unused symbols</a>
+</h3>
+<div>
+ <p>
+ performs code stripping. this transformation can delete:
+ </p>
+
+ <ol>
+ <li>names for virtual registers</li>
+ <li>symbols for internal globals and functions</li>
+ <li>debug information</li>
+ </ol>
+
+ <p>
+ note that this transformation makes code much less readable, so it should
+ only be used in situations where the <tt>strip</tt> utility would be used,
+ such as reducing code size or making it harder to reverse engineer code.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="strip-dead-prototypes">-strip-dead-prototypes: Strip Unused Function Prototypes</a>
+</h3>
+<div>
+ <p>
+ This pass loops over all of the functions in the input module, looking for
+ dead declarations and removes them. Dead declarations are declarations of
+ functions for which no implementation is available (i.e., declarations for
+ unused library functions).
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="strip-debug-declare">-strip-debug-declare: Strip all llvm.dbg.declare intrinsics</a>
+</h3>
+<div>
+ <p>This pass implements code stripping. Specifically, it can delete:</p>
+ <ul>
+ <li>names for virtual registers</li>
+ <li>symbols for internal globals and functions</li>
+ <li>debug information</li>
+ </ul>
+ <p>
+ Note that this transformation makes code much less readable, so it should
+ only be used in situations where the 'strip' utility would be used, such as
+ reducing code size or making it harder to reverse engineer code.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="strip-nondebug">-strip-nondebug: Strip all symbols, except dbg symbols, from a module</a>
+</h3>
+<div>
+ <p>This pass implements code stripping. Specifically, it can delete:</p>
+ <ul>
+ <li>names for virtual registers</li>
+ <li>symbols for internal globals and functions</li>
+ <li>debug information</li>
+ </ul>
+ <p>
+ Note that this transformation makes code much less readable, so it should
+ only be used in situations where the 'strip' utility would be used, such as
+ reducing code size or making it harder to reverse engineer code.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="tailcallelim">-tailcallelim: Tail Call Elimination</a>
+</h3>
+<div>
+ <p>
+ This file transforms calls of the current function (self recursion) followed
+ by a return instruction with a branch to the entry of the function, creating
+ a loop. This pass also implements the following extensions to the basic
+ algorithm:
+ </p>
+
+ <ul>
+ <li>Trivial instructions between the call and return do not prevent the
+ transformation from taking place, though currently the analysis cannot
+ support moving any really useful instructions (only dead ones).
+ <li>This pass transforms functions that are prevented from being tail
+ recursive by an associative expression to use an accumulator variable,
+ thus compiling the typical naive factorial or <tt>fib</tt> implementation
+ into efficient code.
+ <li>TRE is performed if the function returns void, if the return
+ returns the result returned by the call, or if the function returns a
+ run-time constant on all exits from the function. It is possible, though
+ unlikely, that the return returns something else (like constant 0), and
+ can still be TRE'd. It can be TRE'd if <em>all other</em> return
+ instructions in the function return the exact same value.
+ <li>If it can prove that callees do not access theier caller stack frame,
+ they are marked as eligible for tail call elimination (by the code
+ generator).
+ </ul>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="tailduplicate">-tailduplicate: Tail Duplication</a>
+</h3>
+<div>
+ <p>
+ This pass performs a limited form of tail duplication, intended to simplify
+ CFGs by removing some unconditional branches. This pass is necessary to
+ straighten out loops created by the C front-end, but also is capable of
+ making other code nicer. After this pass is run, the CFG simplify pass
+ should be run to clean up the mess.
+ </p>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h2><a name="utilities">Utility Passes</a></h2>
+<div>
+ <p>This section describes the LLVM Utility Passes.</p>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="deadarghaX0r">-deadarghaX0r: Dead Argument Hacking (BUGPOINT USE ONLY; DO NOT USE)</a>
+</h3>
+<div>
+ <p>
+ Same as dead argument elimination, but deletes arguments to functions which
+ are external. This is only for use by <a
+ href="Bugpoint.html">bugpoint</a>.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="extract-blocks">-extract-blocks: Extract Basic Blocks From Module (for bugpoint use)</a>
+</h3>
+<div>
+ <p>
+ This pass is used by bugpoint to extract all blocks from the module into their
+ own functions.</p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="instnamer">-instnamer: Assign names to anonymous instructions</a>
+</h3>
+<div>
+ <p>This is a little utility pass that gives instructions names, this is mostly
+ useful when diffing the effect of an optimization because deleting an
+ unnamed instruction can change all other instruction numbering, making the
+ diff very noisy.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="preverify">-preverify: Preliminary module verification</a>
+</h3>
+<div>
+ <p>
+ Ensures that the module is in the form required by the <a
+ href="#verifier">Module Verifier</a> pass.
+ </p>
+
+ <p>
+ Running the verifier runs this pass automatically, so there should be no need
+ to use it directly.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="verify">-verify: Module Verifier</a>
+</h3>
+<div>
+ <p>
+ Verifies an LLVM IR code. This is useful to run after an optimization which is
+ undergoing testing. Note that <tt>llvm-as</tt> verifies its input before
+ emitting bitcode, and also that malformed bitcode is likely to make LLVM
+ crash. All language front-ends are therefore encouraged to verify their output
+ before performing optimizing transformations.
+ </p>
+
+ <ul>
+ <li>Both of a binary operator's parameters are of the same type.</li>
+ <li>Verify that the indices of mem access instructions match other
+ operands.</li>
+ <li>Verify that arithmetic and other things are only performed on
+ first-class types. Verify that shifts and logicals only happen on
+ integrals f.e.</li>
+ <li>All of the constants in a switch statement are of the correct type.</li>
+ <li>The code is in valid SSA form.</li>
+ <li>It is illegal to put a label into any other type (like a structure) or
+ to return one.</li>
+ <li>Only phi nodes can be self referential: <tt>%x = add i32 %x, %x</tt> is
+ invalid.</li>
+ <li>PHI nodes must have an entry for each predecessor, with no extras.</li>
+ <li>PHI nodes must be the first thing in a basic block, all grouped
+ together.</li>
+ <li>PHI nodes must have at least one entry.</li>
+ <li>All basic blocks should only end with terminator insts, not contain
+ them.</li>
+ <li>The entry node to a function must not have predecessors.</li>
+ <li>All Instructions must be embedded into a basic block.</li>
+ <li>Functions cannot take a void-typed parameter.</li>
+ <li>Verify that a function's argument list agrees with its declared
+ type.</li>
+ <li>It is illegal to specify a name for a void value.</li>
+ <li>It is illegal to have an internal global value with no initializer.</li>
+ <li>It is illegal to have a ret instruction that returns a value that does
+ not agree with the function return value type.</li>
+ <li>Function call argument types match the function prototype.</li>
+ <li>All other things that are tested by asserts spread about the code.</li>
+ </ul>
+
+ <p>
+ Note that this does not provide full security verification (like Java), but
+ instead just tries to ensure that code is well-formed.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="view-cfg">-view-cfg: View CFG of function</a>
+</h3>
+<div>
+ <p>
+ Displays the control flow graph using the GraphViz tool.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="view-cfg-only">-view-cfg-only: View CFG of function (with no function bodies)</a>
+</h3>
+<div>
+ <p>
+ Displays the control flow graph using the GraphViz tool, but omitting function
+ bodies.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="view-dom">-view-dom: View dominance tree of function</a>
+</h3>
+<div>
+ <p>
+ Displays the dominator tree using the GraphViz tool.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="view-dom-only">-view-dom-only: View dominance tree of function (with no function bodies)</a>
+</h3>
+<div>
+ <p>
+ Displays the dominator tree using the GraphViz tool, but omitting function
+ bodies.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="view-postdom">-view-postdom: View postdominance tree of function</a>
+</h3>
+<div>
+ <p>
+ Displays the post dominator tree using the GraphViz tool.
+ </p>
+</div>
+
+<!-------------------------------------------------------------------------- -->
+<h3>
+ <a name="view-postdom-only">-view-postdom-only: View postdominance tree of function (with no function bodies)</a>
+</h3>
+<div>
+ <p>
+ Displays the post dominator tree using the GraphViz tool, but omitting
+ function bodies.
+ </p>
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="mailto:rspencer@x10sys.com">Reid Spencer</a><br>
+ <a href="http://llvm.org/">LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+
+</body>
+</html>
diff --git a/docs/ProgrammersManual.html b/docs/ProgrammersManual.html
new file mode 100644
index 00000000000..036c387d7a5
--- /dev/null
+++ b/docs/ProgrammersManual.html
@@ -0,0 +1,4137 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-type" content="text/html;charset=UTF-8">
+ <title>LLVM Programmer's Manual</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+<body>
+
+<h1>
+ LLVM Programmer's Manual
+</h1>
+
+<ol>
+ <li><a href="#introduction">Introduction</a></li>
+ <li><a href="#general">General Information</a>
+ <ul>
+ <li><a href="#stl">The C++ Standard Template Library</a></li>
+<!--
+ <li>The <tt>-time-passes</tt> option</li>
+ <li>How to use the LLVM Makefile system</li>
+ <li>How to write a regression test</li>
+
+-->
+ </ul>
+ </li>
+ <li><a href="#apis">Important and useful LLVM APIs</a>
+ <ul>
+ <li><a href="#isa">The <tt>isa&lt;&gt;</tt>, <tt>cast&lt;&gt;</tt>
+and <tt>dyn_cast&lt;&gt;</tt> templates</a> </li>
+ <li><a href="#string_apis">Passing strings (the <tt>StringRef</tt>
+and <tt>Twine</tt> classes)</a>
+ <ul>
+ <li><a href="#StringRef">The <tt>StringRef</tt> class</a> </li>
+ <li><a href="#Twine">The <tt>Twine</tt> class</a> </li>
+ </ul>
+ </li>
+ <li><a href="#DEBUG">The <tt>DEBUG()</tt> macro and <tt>-debug</tt>
+option</a>
+ <ul>
+ <li><a href="#DEBUG_TYPE">Fine grained debug info with <tt>DEBUG_TYPE</tt>
+and the <tt>-debug-only</tt> option</a> </li>
+ </ul>
+ </li>
+ <li><a href="#Statistic">The <tt>Statistic</tt> class &amp; <tt>-stats</tt>
+option</a></li>
+<!--
+ <li>The <tt>InstVisitor</tt> template
+ <li>The general graph API
+-->
+ <li><a href="#ViewGraph">Viewing graphs while debugging code</a></li>
+ </ul>
+ </li>
+ <li><a href="#datastructure">Picking the Right Data Structure for a Task</a>
+ <ul>
+ <li><a href="#ds_sequential">Sequential Containers (std::vector, std::list, etc)</a>
+ <ul>
+ <li><a href="#dss_arrayref">llvm/ADT/ArrayRef.h</a></li>
+ <li><a href="#dss_fixedarrays">Fixed Size Arrays</a></li>
+ <li><a href="#dss_heaparrays">Heap Allocated Arrays</a></li>
+ <li><a href="#dss_tinyptrvector">"llvm/ADT/TinyPtrVector.h"</a></li>
+ <li><a href="#dss_smallvector">"llvm/ADT/SmallVector.h"</a></li>
+ <li><a href="#dss_vector">&lt;vector&gt;</a></li>
+ <li><a href="#dss_deque">&lt;deque&gt;</a></li>
+ <li><a href="#dss_list">&lt;list&gt;</a></li>
+ <li><a href="#dss_ilist">llvm/ADT/ilist.h</a></li>
+ <li><a href="#dss_packedvector">llvm/ADT/PackedVector.h</a></li>
+ <li><a href="#dss_other">Other Sequential Container Options</a></li>
+ </ul></li>
+ <li><a href="#ds_string">String-like containers</a>
+ <ul>
+ <li><a href="#dss_stringref">llvm/ADT/StringRef.h</a></li>
+ <li><a href="#dss_twine">llvm/ADT/Twine.h</a></li>
+ <li><a href="#dss_smallstring">llvm/ADT/SmallString.h</a></li>
+ <li><a href="#dss_stdstring">std::string</a></li>
+ </ul></li>
+ <li><a href="#ds_set">Set-Like Containers (std::set, SmallSet, SetVector, etc)</a>
+ <ul>
+ <li><a href="#dss_sortedvectorset">A sorted 'vector'</a></li>
+ <li><a href="#dss_smallset">"llvm/ADT/SmallSet.h"</a></li>
+ <li><a href="#dss_smallptrset">"llvm/ADT/SmallPtrSet.h"</a></li>
+ <li><a href="#dss_denseset">"llvm/ADT/DenseSet.h"</a></li>
+ <li><a href="#dss_sparseset">"llvm/ADT/SparseSet.h"</a></li>
+ <li><a href="#dss_FoldingSet">"llvm/ADT/FoldingSet.h"</a></li>
+ <li><a href="#dss_set">&lt;set&gt;</a></li>
+ <li><a href="#dss_setvector">"llvm/ADT/SetVector.h"</a></li>
+ <li><a href="#dss_uniquevector">"llvm/ADT/UniqueVector.h"</a></li>
+ <li><a href="#dss_immutableset">"llvm/ADT/ImmutableSet.h"</a></li>
+ <li><a href="#dss_otherset">Other Set-Like Container Options</a></li>
+ </ul></li>
+ <li><a href="#ds_map">Map-Like Containers (std::map, DenseMap, etc)</a>
+ <ul>
+ <li><a href="#dss_sortedvectormap">A sorted 'vector'</a></li>
+ <li><a href="#dss_stringmap">"llvm/ADT/StringMap.h"</a></li>
+ <li><a href="#dss_indexedmap">"llvm/ADT/IndexedMap.h"</a></li>
+ <li><a href="#dss_densemap">"llvm/ADT/DenseMap.h"</a></li>
+ <li><a href="#dss_valuemap">"llvm/ADT/ValueMap.h"</a></li>
+ <li><a href="#dss_intervalmap">"llvm/ADT/IntervalMap.h"</a></li>
+ <li><a href="#dss_map">&lt;map&gt;</a></li>
+ <li><a href="#dss_inteqclasses">"llvm/ADT/IntEqClasses.h"</a></li>
+ <li><a href="#dss_immutablemap">"llvm/ADT/ImmutableMap.h"</a></li>
+ <li><a href="#dss_othermap">Other Map-Like Container Options</a></li>
+ </ul></li>
+ <li><a href="#ds_bit">BitVector-like containers</a>
+ <ul>
+ <li><a href="#dss_bitvector">A dense bitvector</a></li>
+ <li><a href="#dss_smallbitvector">A "small" dense bitvector</a></li>
+ <li><a href="#dss_sparsebitvector">A sparse bitvector</a></li>
+ </ul></li>
+ </ul>
+ </li>
+ <li><a href="#common">Helpful Hints for Common Operations</a>
+ <ul>
+ <li><a href="#inspection">Basic Inspection and Traversal Routines</a>
+ <ul>
+ <li><a href="#iterate_function">Iterating over the <tt>BasicBlock</tt>s
+in a <tt>Function</tt></a> </li>
+ <li><a href="#iterate_basicblock">Iterating over the <tt>Instruction</tt>s
+in a <tt>BasicBlock</tt></a> </li>
+ <li><a href="#iterate_institer">Iterating over the <tt>Instruction</tt>s
+in a <tt>Function</tt></a> </li>
+ <li><a href="#iterate_convert">Turning an iterator into a
+class pointer</a> </li>
+ <li><a href="#iterate_complex">Finding call sites: a more
+complex example</a> </li>
+ <li><a href="#calls_and_invokes">Treating calls and invokes
+the same way</a> </li>
+ <li><a href="#iterate_chains">Iterating over def-use &amp;
+use-def chains</a> </li>
+ <li><a href="#iterate_preds">Iterating over predecessors &amp;
+successors of blocks</a></li>
+ </ul>
+ </li>
+ <li><a href="#simplechanges">Making simple changes</a>
+ <ul>
+ <li><a href="#schanges_creating">Creating and inserting new
+ <tt>Instruction</tt>s</a> </li>
+ <li><a href="#schanges_deleting">Deleting <tt>Instruction</tt>s</a> </li>
+ <li><a href="#schanges_replacing">Replacing an <tt>Instruction</tt>
+with another <tt>Value</tt></a> </li>
+ <li><a href="#schanges_deletingGV">Deleting <tt>GlobalVariable</tt>s</a> </li>
+ </ul>
+ </li>
+ <li><a href="#create_types">How to Create Types</a></li>
+<!--
+ <li>Working with the Control Flow Graph
+ <ul>
+ <li>Accessing predecessors and successors of a <tt>BasicBlock</tt>
+ <li>
+ <li>
+ </ul>
+-->
+ </ul>
+ </li>
+
+ <li><a href="#threading">Threads and LLVM</a>
+ <ul>
+ <li><a href="#startmultithreaded">Entering and Exiting Multithreaded Mode
+ </a></li>
+ <li><a href="#shutdown">Ending execution with <tt>llvm_shutdown()</tt></a></li>
+ <li><a href="#managedstatic">Lazy initialization with <tt>ManagedStatic</tt></a></li>
+ <li><a href="#llvmcontext">Achieving Isolation with <tt>LLVMContext</tt></a></li>
+ <li><a href="#jitthreading">Threads and the JIT</a></li>
+ </ul>
+ </li>
+
+ <li><a href="#advanced">Advanced Topics</a>
+ <ul>
+
+ <li><a href="#SymbolTable">The <tt>ValueSymbolTable</tt> class</a></li>
+ <li><a href="#UserLayout">The <tt>User</tt> and owned <tt>Use</tt> classes' memory layout</a></li>
+ </ul></li>
+
+ <li><a href="#coreclasses">The Core LLVM Class Hierarchy Reference</a>
+ <ul>
+ <li><a href="#Type">The <tt>Type</tt> class</a> </li>
+ <li><a href="#Module">The <tt>Module</tt> class</a></li>
+ <li><a href="#Value">The <tt>Value</tt> class</a>
+ <ul>
+ <li><a href="#User">The <tt>User</tt> class</a>
+ <ul>
+ <li><a href="#Instruction">The <tt>Instruction</tt> class</a></li>
+ <li><a href="#Constant">The <tt>Constant</tt> class</a>
+ <ul>
+ <li><a href="#GlobalValue">The <tt>GlobalValue</tt> class</a>
+ <ul>
+ <li><a href="#Function">The <tt>Function</tt> class</a></li>
+ <li><a href="#GlobalVariable">The <tt>GlobalVariable</tt> class</a></li>
+ </ul>
+ </li>
+ </ul>
+ </li>
+ </ul>
+ </li>
+ <li><a href="#BasicBlock">The <tt>BasicBlock</tt> class</a></li>
+ <li><a href="#Argument">The <tt>Argument</tt> class</a></li>
+ </ul>
+ </li>
+ </ul>
+ </li>
+</ol>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a>,
+ <a href="mailto:dhurjati@cs.uiuc.edu">Dinakar Dhurjati</a>,
+ <a href="mailto:ggreif@gmail.com">Gabor Greif</a>,
+ <a href="mailto:jstanley@cs.uiuc.edu">Joel Stanley</a>,
+ <a href="mailto:rspencer@x10sys.com">Reid Spencer</a> and
+ <a href="mailto:owen@apple.com">Owen Anderson</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="introduction">Introduction </a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>This document is meant to highlight some of the important classes and
+interfaces available in the LLVM source-base. This manual is not
+intended to explain what LLVM is, how it works, and what LLVM code looks
+like. It assumes that you know the basics of LLVM and are interested
+in writing transformations or otherwise analyzing or manipulating the
+code.</p>
+
+<p>This document should get you oriented so that you can find your
+way in the continuously growing source code that makes up the LLVM
+infrastructure. Note that this manual is not intended to serve as a
+replacement for reading the source code, so if you think there should be
+a method in one of these classes to do something, but it's not listed,
+check the source. Links to the <a href="/doxygen/">doxygen</a> sources
+are provided to make this as easy as possible.</p>
+
+<p>The first section of this document describes general information that is
+useful to know when working in the LLVM infrastructure, and the second describes
+the Core LLVM classes. In the future this manual will be extended with
+information describing how to use extension libraries, such as dominator
+information, CFG traversal routines, and useful utilities like the <tt><a
+href="/doxygen/InstVisitor_8h-source.html">InstVisitor</a></tt> template.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="general">General Information</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>This section contains general information that is useful if you are working
+in the LLVM source-base, but that isn't specific to any particular API.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="stl">The C++ Standard Template Library</a>
+</h3>
+
+<div>
+
+<p>LLVM makes heavy use of the C++ Standard Template Library (STL),
+perhaps much more than you are used to, or have seen before. Because of
+this, you might want to do a little background reading in the
+techniques used and capabilities of the library. There are many good
+pages that discuss the STL, and several books on the subject that you
+can get, so it will not be discussed in this document.</p>
+
+<p>Here are some useful links:</p>
+
+<ol>
+
+<li><a href="http://www.dinkumware.com/manuals/#Standard C++ Library">Dinkumware
+C++ Library reference</a> - an excellent reference for the STL and other parts
+of the standard C++ library.</li>
+
+<li><a href="http://www.tempest-sw.com/cpp/">C++ In a Nutshell</a> - This is an
+O'Reilly book in the making. It has a decent Standard Library
+Reference that rivals Dinkumware's, and is unfortunately no longer free since the
+book has been published.</li>
+
+<li><a href="http://www.parashift.com/c++-faq-lite/">C++ Frequently Asked
+Questions</a></li>
+
+<li><a href="http://www.sgi.com/tech/stl/">SGI's STL Programmer's Guide</a> -
+Contains a useful <a
+href="http://www.sgi.com/tech/stl/stl_introduction.html">Introduction to the
+STL</a>.</li>
+
+<li><a href="http://www.research.att.com/%7Ebs/C++.html">Bjarne Stroustrup's C++
+Page</a></li>
+
+<li><a href="http://64.78.49.204/">
+Bruce Eckel's Thinking in C++, 2nd ed. Volume 2 Revision 4.0 (even better, get
+the book).</a></li>
+
+</ol>
+
+<p>You are also encouraged to take a look at the <a
+href="CodingStandards.html">LLVM Coding Standards</a> guide which focuses on how
+to write maintainable code more than where to put your curly braces.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="stl">Other useful references</a>
+</h3>
+
+<div>
+
+<ol>
+<li><a href="http://www.fortran-2000.com/ArnaudRecipes/sharedlib.html">Using
+static and shared libraries across platforms</a></li>
+</ol>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="apis">Important and useful LLVM APIs</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Here we highlight some LLVM APIs that are generally useful and good to
+know about when writing transformations.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="isa">The <tt>isa&lt;&gt;</tt>, <tt>cast&lt;&gt;</tt> and
+ <tt>dyn_cast&lt;&gt;</tt> templates</a>
+</h3>
+
+<div>
+
+<p>The LLVM source-base makes extensive use of a custom form of RTTI.
+These templates have many similarities to the C++ <tt>dynamic_cast&lt;&gt;</tt>
+operator, but they don't have some drawbacks (primarily stemming from
+the fact that <tt>dynamic_cast&lt;&gt;</tt> only works on classes that
+have a v-table). Because they are used so often, you must know what they
+do and how they work. All of these templates are defined in the <a
+ href="/doxygen/Casting_8h-source.html"><tt>llvm/Support/Casting.h</tt></a>
+file (note that you very rarely have to include this file directly).</p>
+
+<dl>
+ <dt><tt>isa&lt;&gt;</tt>: </dt>
+
+ <dd><p>The <tt>isa&lt;&gt;</tt> operator works exactly like the Java
+ "<tt>instanceof</tt>" operator. It returns true or false depending on whether
+ a reference or pointer points to an instance of the specified class. This can
+ be very useful for constraint checking of various sorts (example below).</p>
+ </dd>
+
+ <dt><tt>cast&lt;&gt;</tt>: </dt>
+
+ <dd><p>The <tt>cast&lt;&gt;</tt> operator is a "checked cast" operation. It
+ converts a pointer or reference from a base class to a derived class, causing
+ an assertion failure if it is not really an instance of the right type. This
+ should be used in cases where you have some information that makes you believe
+ that something is of the right type. An example of the <tt>isa&lt;&gt;</tt>
+ and <tt>cast&lt;&gt;</tt> template is:</p>
+
+<div class="doc_code">
+<pre>
+static bool isLoopInvariant(const <a href="#Value">Value</a> *V, const Loop *L) {
+ if (isa&lt;<a href="#Constant">Constant</a>&gt;(V) || isa&lt;<a href="#Argument">Argument</a>&gt;(V) || isa&lt;<a href="#GlobalValue">GlobalValue</a>&gt;(V))
+ return true;
+
+ // <i>Otherwise, it must be an instruction...</i>
+ return !L-&gt;contains(cast&lt;<a href="#Instruction">Instruction</a>&gt;(V)-&gt;getParent());
+}
+</pre>
+</div>
+
+ <p>Note that you should <b>not</b> use an <tt>isa&lt;&gt;</tt> test followed
+ by a <tt>cast&lt;&gt;</tt>, for that use the <tt>dyn_cast&lt;&gt;</tt>
+ operator.</p>
+
+ </dd>
+
+ <dt><tt>dyn_cast&lt;&gt;</tt>:</dt>
+
+ <dd><p>The <tt>dyn_cast&lt;&gt;</tt> operator is a "checking cast" operation.
+ It checks to see if the operand is of the specified type, and if so, returns a
+ pointer to it (this operator does not work with references). If the operand is
+ not of the correct type, a null pointer is returned. Thus, this works very
+ much like the <tt>dynamic_cast&lt;&gt;</tt> operator in C++, and should be
+ used in the same circumstances. Typically, the <tt>dyn_cast&lt;&gt;</tt>
+ operator is used in an <tt>if</tt> statement or some other flow control
+ statement like this:</p>
+
+<div class="doc_code">
+<pre>
+if (<a href="#AllocationInst">AllocationInst</a> *AI = dyn_cast&lt;<a href="#AllocationInst">AllocationInst</a>&gt;(Val)) {
+ // <i>...</i>
+}
+</pre>
+</div>
+
+ <p>This form of the <tt>if</tt> statement effectively combines together a call
+ to <tt>isa&lt;&gt;</tt> and a call to <tt>cast&lt;&gt;</tt> into one
+ statement, which is very convenient.</p>
+
+ <p>Note that the <tt>dyn_cast&lt;&gt;</tt> operator, like C++'s
+ <tt>dynamic_cast&lt;&gt;</tt> or Java's <tt>instanceof</tt> operator, can be
+ abused. In particular, you should not use big chained <tt>if/then/else</tt>
+ blocks to check for lots of different variants of classes. If you find
+ yourself wanting to do this, it is much cleaner and more efficient to use the
+ <tt>InstVisitor</tt> class to dispatch over the instruction type directly.</p>
+
+ </dd>
+
+ <dt><tt>cast_or_null&lt;&gt;</tt>: </dt>
+
+ <dd><p>The <tt>cast_or_null&lt;&gt;</tt> operator works just like the
+ <tt>cast&lt;&gt;</tt> operator, except that it allows for a null pointer as an
+ argument (which it then propagates). This can sometimes be useful, allowing
+ you to combine several null checks into one.</p></dd>
+
+ <dt><tt>dyn_cast_or_null&lt;&gt;</tt>: </dt>
+
+ <dd><p>The <tt>dyn_cast_or_null&lt;&gt;</tt> operator works just like the
+ <tt>dyn_cast&lt;&gt;</tt> operator, except that it allows for a null pointer
+ as an argument (which it then propagates). This can sometimes be useful,
+ allowing you to combine several null checks into one.</p></dd>
+
+</dl>
+
+<p>These five templates can be used with any classes, whether they have a
+v-table or not. To add support for these templates, you simply need to add
+<tt>classof</tt> static methods to the class you are interested casting
+to. Describing this is currently outside the scope of this document, but there
+are lots of examples in the LLVM source base.</p>
+
+</div>
+
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="string_apis">Passing strings (the <tt>StringRef</tt>
+and <tt>Twine</tt> classes)</a>
+</h3>
+
+<div>
+
+<p>Although LLVM generally does not do much string manipulation, we do have
+several important APIs which take strings. Two important examples are the
+Value class -- which has names for instructions, functions, etc. -- and the
+StringMap class which is used extensively in LLVM and Clang.</p>
+
+<p>These are generic classes, and they need to be able to accept strings which
+may have embedded null characters. Therefore, they cannot simply take
+a <tt>const char *</tt>, and taking a <tt>const std::string&amp;</tt> requires
+clients to perform a heap allocation which is usually unnecessary. Instead,
+many LLVM APIs use a <tt>StringRef</tt> or a <tt>const Twine&amp;</tt> for
+passing strings efficiently.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="StringRef">The <tt>StringRef</tt> class</a>
+</h4>
+
+<div>
+
+<p>The <tt>StringRef</tt> data type represents a reference to a constant string
+(a character array and a length) and supports the common operations available
+on <tt>std:string</tt>, but does not require heap allocation.</p>
+
+<p>It can be implicitly constructed using a C style null-terminated string,
+an <tt>std::string</tt>, or explicitly with a character pointer and length.
+For example, the <tt>StringRef</tt> find function is declared as:</p>
+
+<pre class="doc_code">
+ iterator find(StringRef Key);
+</pre>
+
+<p>and clients can call it using any one of:</p>
+
+<pre class="doc_code">
+ Map.find("foo"); <i>// Lookup "foo"</i>
+ Map.find(std::string("bar")); <i>// Lookup "bar"</i>
+ Map.find(StringRef("\0baz", 4)); <i>// Lookup "\0baz"</i>
+</pre>
+
+<p>Similarly, APIs which need to return a string may return a <tt>StringRef</tt>
+instance, which can be used directly or converted to an <tt>std::string</tt>
+using the <tt>str</tt> member function. See
+"<tt><a href="/doxygen/classllvm_1_1StringRef_8h-source.html">llvm/ADT/StringRef.h</a></tt>"
+for more information.</p>
+
+<p>You should rarely use the <tt>StringRef</tt> class directly, because it contains
+pointers to external memory it is not generally safe to store an instance of the
+class (unless you know that the external storage will not be freed). StringRef is
+small and pervasive enough in LLVM that it should always be passed by value.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="Twine">The <tt>Twine</tt> class</a>
+</h4>
+
+<div>
+
+<p>The <tt><a href="/doxygen/classllvm_1_1Twine.html">Twine</a></tt> class is an
+efficient way for APIs to accept concatenated strings. For example, a common
+LLVM paradigm is to name one instruction based on
+the name of another instruction with a suffix, for example:</p>
+
+<div class="doc_code">
+<pre>
+ New = CmpInst::Create(<i>...</i>, SO->getName() + ".cmp");
+</pre>
+</div>
+
+<p>The <tt>Twine</tt> class is effectively a lightweight
+<a href="http://en.wikipedia.org/wiki/Rope_(computer_science)">rope</a>
+which points to temporary (stack allocated) objects. Twines can be implicitly
+constructed as the result of the plus operator applied to strings (i.e., a C
+strings, an <tt>std::string</tt>, or a <tt>StringRef</tt>). The twine delays
+the actual concatenation of strings until it is actually required, at which
+point it can be efficiently rendered directly into a character array. This
+avoids unnecessary heap allocation involved in constructing the temporary
+results of string concatenation. See
+"<tt><a href="/doxygen/Twine_8h_source.html">llvm/ADT/Twine.h</a></tt>"
+and <a href="#dss_twine">here</a> for more information.</p>
+
+<p>As with a <tt>StringRef</tt>, <tt>Twine</tt> objects point to external memory
+and should almost never be stored or mentioned directly. They are intended
+solely for use when defining a function which should be able to efficiently
+accept concatenated strings.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="DEBUG">The <tt>DEBUG()</tt> macro and <tt>-debug</tt> option</a>
+</h3>
+
+<div>
+
+<p>Often when working on your pass you will put a bunch of debugging printouts
+and other code into your pass. After you get it working, you want to remove
+it, but you may need it again in the future (to work out new bugs that you run
+across).</p>
+
+<p> Naturally, because of this, you don't want to delete the debug printouts,
+but you don't want them to always be noisy. A standard compromise is to comment
+them out, allowing you to enable them if you need them in the future.</p>
+
+<p>The "<tt><a href="/doxygen/Debug_8h-source.html">llvm/Support/Debug.h</a></tt>"
+file provides a macro named <tt>DEBUG()</tt> that is a much nicer solution to
+this problem. Basically, you can put arbitrary code into the argument of the
+<tt>DEBUG</tt> macro, and it is only executed if '<tt>opt</tt>' (or any other
+tool) is run with the '<tt>-debug</tt>' command line argument:</p>
+
+<div class="doc_code">
+<pre>
+DEBUG(errs() &lt;&lt; "I am here!\n");
+</pre>
+</div>
+
+<p>Then you can run your pass like this:</p>
+
+<div class="doc_code">
+<pre>
+$ opt &lt; a.bc &gt; /dev/null -mypass
+<i>&lt;no output&gt;</i>
+$ opt &lt; a.bc &gt; /dev/null -mypass -debug
+I am here!
+</pre>
+</div>
+
+<p>Using the <tt>DEBUG()</tt> macro instead of a home-brewed solution allows you
+to not have to create "yet another" command line option for the debug output for
+your pass. Note that <tt>DEBUG()</tt> macros are disabled for optimized builds,
+so they do not cause a performance impact at all (for the same reason, they
+should also not contain side-effects!).</p>
+
+<p>One additional nice thing about the <tt>DEBUG()</tt> macro is that you can
+enable or disable it directly in gdb. Just use "<tt>set DebugFlag=0</tt>" or
+"<tt>set DebugFlag=1</tt>" from the gdb if the program is running. If the
+program hasn't been started yet, you can always just run it with
+<tt>-debug</tt>.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="DEBUG_TYPE">Fine grained debug info with <tt>DEBUG_TYPE</tt> and
+ the <tt>-debug-only</tt> option</a>
+</h4>
+
+<div>
+
+<p>Sometimes you may find yourself in a situation where enabling <tt>-debug</tt>
+just turns on <b>too much</b> information (such as when working on the code
+generator). If you want to enable debug information with more fine-grained
+control, you define the <tt>DEBUG_TYPE</tt> macro and the <tt>-debug</tt> only
+option as follows:</p>
+
+<div class="doc_code">
+<pre>
+#undef DEBUG_TYPE
+DEBUG(errs() &lt;&lt; "No debug type\n");
+#define DEBUG_TYPE "foo"
+DEBUG(errs() &lt;&lt; "'foo' debug type\n");
+#undef DEBUG_TYPE
+#define DEBUG_TYPE "bar"
+DEBUG(errs() &lt;&lt; "'bar' debug type\n"));
+#undef DEBUG_TYPE
+#define DEBUG_TYPE ""
+DEBUG(errs() &lt;&lt; "No debug type (2)\n");
+</pre>
+</div>
+
+<p>Then you can run your pass like this:</p>
+
+<div class="doc_code">
+<pre>
+$ opt &lt; a.bc &gt; /dev/null -mypass
+<i>&lt;no output&gt;</i>
+$ opt &lt; a.bc &gt; /dev/null -mypass -debug
+No debug type
+'foo' debug type
+'bar' debug type
+No debug type (2)
+$ opt &lt; a.bc &gt; /dev/null -mypass -debug-only=foo
+'foo' debug type
+$ opt &lt; a.bc &gt; /dev/null -mypass -debug-only=bar
+'bar' debug type
+</pre>
+</div>
+
+<p>Of course, in practice, you should only set <tt>DEBUG_TYPE</tt> at the top of
+a file, to specify the debug type for the entire module (if you do this before
+you <tt>#include "llvm/Support/Debug.h"</tt>, you don't have to insert the ugly
+<tt>#undef</tt>'s). Also, you should use names more meaningful than "foo" and
+"bar", because there is no system in place to ensure that names do not
+conflict. If two different modules use the same string, they will all be turned
+on when the name is specified. This allows, for example, all debug information
+for instruction scheduling to be enabled with <tt>-debug-type=InstrSched</tt>,
+even if the source lives in multiple files.</p>
+
+<p>The <tt>DEBUG_WITH_TYPE</tt> macro is also available for situations where you
+would like to set <tt>DEBUG_TYPE</tt>, but only for one specific <tt>DEBUG</tt>
+statement. It takes an additional first parameter, which is the type to use. For
+example, the preceding example could be written as:</p>
+
+
+<div class="doc_code">
+<pre>
+DEBUG_WITH_TYPE("", errs() &lt;&lt; "No debug type\n");
+DEBUG_WITH_TYPE("foo", errs() &lt;&lt; "'foo' debug type\n");
+DEBUG_WITH_TYPE("bar", errs() &lt;&lt; "'bar' debug type\n"));
+DEBUG_WITH_TYPE("", errs() &lt;&lt; "No debug type (2)\n");
+</pre>
+</div>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="Statistic">The <tt>Statistic</tt> class &amp; <tt>-stats</tt>
+ option</a>
+</h3>
+
+<div>
+
+<p>The "<tt><a
+href="/doxygen/Statistic_8h-source.html">llvm/ADT/Statistic.h</a></tt>" file
+provides a class named <tt>Statistic</tt> that is used as a unified way to
+keep track of what the LLVM compiler is doing and how effective various
+optimizations are. It is useful to see what optimizations are contributing to
+making a particular program run faster.</p>
+
+<p>Often you may run your pass on some big program, and you're interested to see
+how many times it makes a certain transformation. Although you can do this with
+hand inspection, or some ad-hoc method, this is a real pain and not very useful
+for big programs. Using the <tt>Statistic</tt> class makes it very easy to
+keep track of this information, and the calculated information is presented in a
+uniform manner with the rest of the passes being executed.</p>
+
+<p>There are many examples of <tt>Statistic</tt> uses, but the basics of using
+it are as follows:</p>
+
+<ol>
+ <li><p>Define your statistic like this:</p>
+
+<div class="doc_code">
+<pre>
+#define <a href="#DEBUG_TYPE">DEBUG_TYPE</a> "mypassname" <i>// This goes before any #includes.</i>
+STATISTIC(NumXForms, "The # of times I did stuff");
+</pre>
+</div>
+
+ <p>The <tt>STATISTIC</tt> macro defines a static variable, whose name is
+ specified by the first argument. The pass name is taken from the DEBUG_TYPE
+ macro, and the description is taken from the second argument. The variable
+ defined ("NumXForms" in this case) acts like an unsigned integer.</p></li>
+
+ <li><p>Whenever you make a transformation, bump the counter:</p>
+
+<div class="doc_code">
+<pre>
+++NumXForms; // <i>I did stuff!</i>
+</pre>
+</div>
+
+ </li>
+ </ol>
+
+ <p>That's all you have to do. To get '<tt>opt</tt>' to print out the
+ statistics gathered, use the '<tt>-stats</tt>' option:</p>
+
+<div class="doc_code">
+<pre>
+$ opt -stats -mypassname &lt; program.bc &gt; /dev/null
+<i>... statistics output ...</i>
+</pre>
+</div>
+
+ <p> When running <tt>opt</tt> on a C file from the SPEC benchmark
+suite, it gives a report that looks like this:</p>
+
+<div class="doc_code">
+<pre>
+ 7646 bitcodewriter - Number of normal instructions
+ 725 bitcodewriter - Number of oversized instructions
+ 129996 bitcodewriter - Number of bitcode bytes written
+ 2817 raise - Number of insts DCEd or constprop'd
+ 3213 raise - Number of cast-of-self removed
+ 5046 raise - Number of expression trees converted
+ 75 raise - Number of other getelementptr's formed
+ 138 raise - Number of load/store peepholes
+ 42 deadtypeelim - Number of unused typenames removed from symtab
+ 392 funcresolve - Number of varargs functions resolved
+ 27 globaldce - Number of global variables removed
+ 2 adce - Number of basic blocks removed
+ 134 cee - Number of branches revectored
+ 49 cee - Number of setcc instruction eliminated
+ 532 gcse - Number of loads removed
+ 2919 gcse - Number of instructions removed
+ 86 indvars - Number of canonical indvars added
+ 87 indvars - Number of aux indvars removed
+ 25 instcombine - Number of dead inst eliminate
+ 434 instcombine - Number of insts combined
+ 248 licm - Number of load insts hoisted
+ 1298 licm - Number of insts hoisted to a loop pre-header
+ 3 licm - Number of insts hoisted to multiple loop preds (bad, no loop pre-header)
+ 75 mem2reg - Number of alloca's promoted
+ 1444 cfgsimplify - Number of blocks simplified
+</pre>
+</div>
+
+<p>Obviously, with so many optimizations, having a unified framework for this
+stuff is very nice. Making your pass fit well into the framework makes it more
+maintainable and useful.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ViewGraph">Viewing graphs while debugging code</a>
+</h3>
+
+<div>
+
+<p>Several of the important data structures in LLVM are graphs: for example
+CFGs made out of LLVM <a href="#BasicBlock">BasicBlock</a>s, CFGs made out of
+LLVM <a href="CodeGenerator.html#machinebasicblock">MachineBasicBlock</a>s, and
+<a href="CodeGenerator.html#selectiondag_intro">Instruction Selection
+DAGs</a>. In many cases, while debugging various parts of the compiler, it is
+nice to instantly visualize these graphs.</p>
+
+<p>LLVM provides several callbacks that are available in a debug build to do
+exactly that. If you call the <tt>Function::viewCFG()</tt> method, for example,
+the current LLVM tool will pop up a window containing the CFG for the function
+where each basic block is a node in the graph, and each node contains the
+instructions in the block. Similarly, there also exists
+<tt>Function::viewCFGOnly()</tt> (does not include the instructions), the
+<tt>MachineFunction::viewCFG()</tt> and <tt>MachineFunction::viewCFGOnly()</tt>,
+and the <tt>SelectionDAG::viewGraph()</tt> methods. Within GDB, for example,
+you can usually use something like <tt>call DAG.viewGraph()</tt> to pop
+up a window. Alternatively, you can sprinkle calls to these functions in your
+code in places you want to debug.</p>
+
+<p>Getting this to work requires a small amount of configuration. On Unix
+systems with X11, install the <a href="http://www.graphviz.org">graphviz</a>
+toolkit, and make sure 'dot' and 'gv' are in your path. If you are running on
+Mac OS/X, download and install the Mac OS/X <a
+href="http://www.pixelglow.com/graphviz/">Graphviz program</a>, and add
+<tt>/Applications/Graphviz.app/Contents/MacOS/</tt> (or wherever you install
+it) to your path. Once in your system and path are set up, rerun the LLVM
+configure script and rebuild LLVM to enable this functionality.</p>
+
+<p><tt>SelectionDAG</tt> has been extended to make it easier to locate
+<i>interesting</i> nodes in large complex graphs. From gdb, if you
+<tt>call DAG.setGraphColor(<i>node</i>, "<i>color</i>")</tt>, then the
+next <tt>call DAG.viewGraph()</tt> would highlight the node in the
+specified color (choices of colors can be found at <a
+href="http://www.graphviz.org/doc/info/colors.html">colors</a>.) More
+complex node attributes can be provided with <tt>call
+DAG.setGraphAttrs(<i>node</i>, "<i>attributes</i>")</tt> (choices can be
+found at <a href="http://www.graphviz.org/doc/info/attrs.html">Graph
+Attributes</a>.) If you want to restart and clear all the current graph
+attributes, then you can <tt>call DAG.clearGraphAttrs()</tt>. </p>
+
+<p>Note that graph visualization features are compiled out of Release builds
+to reduce file size. This means that you need a Debug+Asserts or
+Release+Asserts build to use these features.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="datastructure">Picking the Right Data Structure for a Task</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>LLVM has a plethora of data structures in the <tt>llvm/ADT/</tt> directory,
+ and we commonly use STL data structures. This section describes the trade-offs
+ you should consider when you pick one.</p>
+
+<p>
+The first step is a choose your own adventure: do you want a sequential
+container, a set-like container, or a map-like container? The most important
+thing when choosing a container is the algorithmic properties of how you plan to
+access the container. Based on that, you should use:</p>
+
+<ul>
+<li>a <a href="#ds_map">map-like</a> container if you need efficient look-up
+ of an value based on another value. Map-like containers also support
+ efficient queries for containment (whether a key is in the map). Map-like
+ containers generally do not support efficient reverse mapping (values to
+ keys). If you need that, use two maps. Some map-like containers also
+ support efficient iteration through the keys in sorted order. Map-like
+ containers are the most expensive sort, only use them if you need one of
+ these capabilities.</li>
+
+<li>a <a href="#ds_set">set-like</a> container if you need to put a bunch of
+ stuff into a container that automatically eliminates duplicates. Some
+ set-like containers support efficient iteration through the elements in
+ sorted order. Set-like containers are more expensive than sequential
+ containers.
+</li>
+
+<li>a <a href="#ds_sequential">sequential</a> container provides
+ the most efficient way to add elements and keeps track of the order they are
+ added to the collection. They permit duplicates and support efficient
+ iteration, but do not support efficient look-up based on a key.
+</li>
+
+<li>a <a href="#ds_string">string</a> container is a specialized sequential
+ container or reference structure that is used for character or byte
+ arrays.</li>
+
+<li>a <a href="#ds_bit">bit</a> container provides an efficient way to store and
+ perform set operations on sets of numeric id's, while automatically
+ eliminating duplicates. Bit containers require a maximum of 1 bit for each
+ identifier you want to store.
+</li>
+</ul>
+
+<p>
+Once the proper category of container is determined, you can fine tune the
+memory use, constant factors, and cache behaviors of access by intelligently
+picking a member of the category. Note that constant factors and cache behavior
+can be a big deal. If you have a vector that usually only contains a few
+elements (but could contain many), for example, it's much better to use
+<a href="#dss_smallvector">SmallVector</a> than <a href="#dss_vector">vector</a>
+. Doing so avoids (relatively) expensive malloc/free calls, which dwarf the
+cost of adding the elements to the container. </p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ds_sequential">Sequential Containers (std::vector, std::list, etc)</a>
+</h3>
+
+<div>
+There are a variety of sequential containers available for you, based on your
+needs. Pick the first in this section that will do what you want.
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_arrayref">llvm/ADT/ArrayRef.h</a>
+</h4>
+
+<div>
+<p>The llvm::ArrayRef class is the preferred class to use in an interface that
+ accepts a sequential list of elements in memory and just reads from them. By
+ taking an ArrayRef, the API can be passed a fixed size array, an std::vector,
+ an llvm::SmallVector and anything else that is contiguous in memory.
+</p>
+</div>
+
+
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_fixedarrays">Fixed Size Arrays</a>
+</h4>
+
+<div>
+<p>Fixed size arrays are very simple and very fast. They are good if you know
+exactly how many elements you have, or you have a (low) upper bound on how many
+you have.</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_heaparrays">Heap Allocated Arrays</a>
+</h4>
+
+<div>
+<p>Heap allocated arrays (new[] + delete[]) are also simple. They are good if
+the number of elements is variable, if you know how many elements you will need
+before the array is allocated, and if the array is usually large (if not,
+consider a <a href="#dss_smallvector">SmallVector</a>). The cost of a heap
+allocated array is the cost of the new/delete (aka malloc/free). Also note that
+if you are allocating an array of a type with a constructor, the constructor and
+destructors will be run for every element in the array (re-sizable vectors only
+construct those elements actually used).</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_tinyptrvector">"llvm/ADT/TinyPtrVector.h"</a>
+</h4>
+
+
+<div>
+<p><tt>TinyPtrVector&lt;Type&gt;</tt> is a highly specialized collection class
+that is optimized to avoid allocation in the case when a vector has zero or one
+elements. It has two major restrictions: 1) it can only hold values of pointer
+type, and 2) it cannot hold a null pointer.</p>
+
+<p>Since this container is highly specialized, it is rarely used.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_smallvector">"llvm/ADT/SmallVector.h"</a>
+</h4>
+
+<div>
+<p><tt>SmallVector&lt;Type, N&gt;</tt> is a simple class that looks and smells
+just like <tt>vector&lt;Type&gt;</tt>:
+it supports efficient iteration, lays out elements in memory order (so you can
+do pointer arithmetic between elements), supports efficient push_back/pop_back
+operations, supports efficient random access to its elements, etc.</p>
+
+<p>The advantage of SmallVector is that it allocates space for
+some number of elements (N) <b>in the object itself</b>. Because of this, if
+the SmallVector is dynamically smaller than N, no malloc is performed. This can
+be a big win in cases where the malloc/free call is far more expensive than the
+code that fiddles around with the elements.</p>
+
+<p>This is good for vectors that are "usually small" (e.g. the number of
+predecessors/successors of a block is usually less than 8). On the other hand,
+this makes the size of the SmallVector itself large, so you don't want to
+allocate lots of them (doing so will waste a lot of space). As such,
+SmallVectors are most useful when on the stack.</p>
+
+<p>SmallVector also provides a nice portable and efficient replacement for
+<tt>alloca</tt>.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_vector">&lt;vector&gt;</a>
+</h4>
+
+<div>
+<p>
+std::vector is well loved and respected. It is useful when SmallVector isn't:
+when the size of the vector is often large (thus the small optimization will
+rarely be a benefit) or if you will be allocating many instances of the vector
+itself (which would waste space for elements that aren't in the container).
+vector is also useful when interfacing with code that expects vectors :).
+</p>
+
+<p>One worthwhile note about std::vector: avoid code like this:</p>
+
+<div class="doc_code">
+<pre>
+for ( ... ) {
+ std::vector&lt;foo&gt; V;
+ // make use of V.
+}
+</pre>
+</div>
+
+<p>Instead, write this as:</p>
+
+<div class="doc_code">
+<pre>
+std::vector&lt;foo&gt; V;
+for ( ... ) {
+ // make use of V.
+ V.clear();
+}
+</pre>
+</div>
+
+<p>Doing so will save (at least) one heap allocation and free per iteration of
+the loop.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_deque">&lt;deque&gt;</a>
+</h4>
+
+<div>
+<p>std::deque is, in some senses, a generalized version of std::vector. Like
+std::vector, it provides constant time random access and other similar
+properties, but it also provides efficient access to the front of the list. It
+does not guarantee continuity of elements within memory.</p>
+
+<p>In exchange for this extra flexibility, std::deque has significantly higher
+constant factor costs than std::vector. If possible, use std::vector or
+something cheaper.</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_list">&lt;list&gt;</a>
+</h4>
+
+<div>
+<p>std::list is an extremely inefficient class that is rarely useful.
+It performs a heap allocation for every element inserted into it, thus having an
+extremely high constant factor, particularly for small data types. std::list
+also only supports bidirectional iteration, not random access iteration.</p>
+
+<p>In exchange for this high cost, std::list supports efficient access to both
+ends of the list (like std::deque, but unlike std::vector or SmallVector). In
+addition, the iterator invalidation characteristics of std::list are stronger
+than that of a vector class: inserting or removing an element into the list does
+not invalidate iterator or pointers to other elements in the list.</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_ilist">llvm/ADT/ilist.h</a>
+</h4>
+
+<div>
+<p><tt>ilist&lt;T&gt;</tt> implements an 'intrusive' doubly-linked list. It is
+intrusive, because it requires the element to store and provide access to the
+prev/next pointers for the list.</p>
+
+<p><tt>ilist</tt> has the same drawbacks as <tt>std::list</tt>, and additionally
+requires an <tt>ilist_traits</tt> implementation for the element type, but it
+provides some novel characteristics. In particular, it can efficiently store
+polymorphic objects, the traits class is informed when an element is inserted or
+removed from the list, and <tt>ilist</tt>s are guaranteed to support a
+constant-time splice operation.</p>
+
+<p>These properties are exactly what we want for things like
+<tt>Instruction</tt>s and basic blocks, which is why these are implemented with
+<tt>ilist</tt>s.</p>
+
+Related classes of interest are explained in the following subsections:
+ <ul>
+ <li><a href="#dss_ilist_traits">ilist_traits</a></li>
+ <li><a href="#dss_iplist">iplist</a></li>
+ <li><a href="#dss_ilist_node">llvm/ADT/ilist_node.h</a></li>
+ <li><a href="#dss_ilist_sentinel">Sentinels</a></li>
+ </ul>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_packedvector">llvm/ADT/PackedVector.h</a>
+</h4>
+
+<div>
+<p>
+Useful for storing a vector of values using only a few number of bits for each
+value. Apart from the standard operations of a vector-like container, it can
+also perform an 'or' set operation.
+</p>
+
+<p>For example:</p>
+
+<div class="doc_code">
+<pre>
+enum State {
+ None = 0x0,
+ FirstCondition = 0x1,
+ SecondCondition = 0x2,
+ Both = 0x3
+};
+
+State get() {
+ PackedVector&lt;State, 2&gt; Vec1;
+ Vec1.push_back(FirstCondition);
+
+ PackedVector&lt;State, 2&gt; Vec2;
+ Vec2.push_back(SecondCondition);
+
+ Vec1 |= Vec2;
+ return Vec1[0]; // returns 'Both'.
+}
+</pre>
+</div>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_ilist_traits">ilist_traits</a>
+</h4>
+
+<div>
+<p><tt>ilist_traits&lt;T&gt;</tt> is <tt>ilist&lt;T&gt;</tt>'s customization
+mechanism. <tt>iplist&lt;T&gt;</tt> (and consequently <tt>ilist&lt;T&gt;</tt>)
+publicly derive from this traits class.</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_iplist">iplist</a>
+</h4>
+
+<div>
+<p><tt>iplist&lt;T&gt;</tt> is <tt>ilist&lt;T&gt;</tt>'s base and as such
+supports a slightly narrower interface. Notably, inserters from
+<tt>T&amp;</tt> are absent.</p>
+
+<p><tt>ilist_traits&lt;T&gt;</tt> is a public base of this class and can be
+used for a wide variety of customizations.</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_ilist_node">llvm/ADT/ilist_node.h</a>
+</h4>
+
+<div>
+<p><tt>ilist_node&lt;T&gt;</tt> implements a the forward and backward links
+that are expected by the <tt>ilist&lt;T&gt;</tt> (and analogous containers)
+in the default manner.</p>
+
+<p><tt>ilist_node&lt;T&gt;</tt>s are meant to be embedded in the node type
+<tt>T</tt>, usually <tt>T</tt> publicly derives from
+<tt>ilist_node&lt;T&gt;</tt>.</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_ilist_sentinel">Sentinels</a>
+</h4>
+
+<div>
+<p><tt>ilist</tt>s have another specialty that must be considered. To be a good
+citizen in the C++ ecosystem, it needs to support the standard container
+operations, such as <tt>begin</tt> and <tt>end</tt> iterators, etc. Also, the
+<tt>operator--</tt> must work correctly on the <tt>end</tt> iterator in the
+case of non-empty <tt>ilist</tt>s.</p>
+
+<p>The only sensible solution to this problem is to allocate a so-called
+<i>sentinel</i> along with the intrusive list, which serves as the <tt>end</tt>
+iterator, providing the back-link to the last element. However conforming to the
+C++ convention it is illegal to <tt>operator++</tt> beyond the sentinel and it
+also must not be dereferenced.</p>
+
+<p>These constraints allow for some implementation freedom to the <tt>ilist</tt>
+how to allocate and store the sentinel. The corresponding policy is dictated
+by <tt>ilist_traits&lt;T&gt;</tt>. By default a <tt>T</tt> gets heap-allocated
+whenever the need for a sentinel arises.</p>
+
+<p>While the default policy is sufficient in most cases, it may break down when
+<tt>T</tt> does not provide a default constructor. Also, in the case of many
+instances of <tt>ilist</tt>s, the memory overhead of the associated sentinels
+is wasted. To alleviate the situation with numerous and voluminous
+<tt>T</tt>-sentinels, sometimes a trick is employed, leading to <i>ghostly
+sentinels</i>.</p>
+
+<p>Ghostly sentinels are obtained by specially-crafted <tt>ilist_traits&lt;T&gt;</tt>
+which superpose the sentinel with the <tt>ilist</tt> instance in memory. Pointer
+arithmetic is used to obtain the sentinel, which is relative to the
+<tt>ilist</tt>'s <tt>this</tt> pointer. The <tt>ilist</tt> is augmented by an
+extra pointer, which serves as the back-link of the sentinel. This is the only
+field in the ghostly sentinel which can be legally accessed.</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_other">Other Sequential Container options</a>
+</h4>
+
+<div>
+<p>Other STL containers are available, such as std::string.</p>
+
+<p>There are also various STL adapter classes such as std::queue,
+std::priority_queue, std::stack, etc. These provide simplified access to an
+underlying container but don't affect the cost of the container itself.</p>
+
+</div>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ds_string">String-like containers</a>
+</h3>
+
+<div>
+
+<p>
+There are a variety of ways to pass around and use strings in C and C++, and
+LLVM adds a few new options to choose from. Pick the first option on this list
+that will do what you need, they are ordered according to their relative cost.
+</p>
+<p>
+Note that is is generally preferred to <em>not</em> pass strings around as
+"<tt>const char*</tt>"'s. These have a number of problems, including the fact
+that they cannot represent embedded nul ("\0") characters, and do not have a
+length available efficiently. The general replacement for '<tt>const
+char*</tt>' is StringRef.
+</p>
+
+<p>For more information on choosing string containers for APIs, please see
+<a href="#string_apis">Passing strings</a>.</p>
+
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_stringref">llvm/ADT/StringRef.h</a>
+</h4>
+
+<div>
+<p>
+The StringRef class is a simple value class that contains a pointer to a
+character and a length, and is quite related to the <a
+href="#dss_arrayref">ArrayRef</a> class (but specialized for arrays of
+characters). Because StringRef carries a length with it, it safely handles
+strings with embedded nul characters in it, getting the length does not require
+a strlen call, and it even has very convenient APIs for slicing and dicing the
+character range that it represents.
+</p>
+
+<p>
+StringRef is ideal for passing simple strings around that are known to be live,
+either because they are C string literals, std::string, a C array, or a
+SmallVector. Each of these cases has an efficient implicit conversion to
+StringRef, which doesn't result in a dynamic strlen being executed.
+</p>
+
+<p>StringRef has a few major limitations which make more powerful string
+containers useful:</p>
+
+<ol>
+<li>You cannot directly convert a StringRef to a 'const char*' because there is
+no way to add a trailing nul (unlike the .c_str() method on various stronger
+classes).</li>
+
+
+<li>StringRef doesn't own or keep alive the underlying string bytes.
+As such it can easily lead to dangling pointers, and is not suitable for
+embedding in datastructures in most cases (instead, use an std::string or
+something like that).</li>
+
+<li>For the same reason, StringRef cannot be used as the return value of a
+method if the method "computes" the result string. Instead, use
+std::string.</li>
+
+<li>StringRef's do not allow you to mutate the pointed-to string bytes and it
+doesn't allow you to insert or remove bytes from the range. For editing
+operations like this, it interoperates with the <a
+href="#dss_twine">Twine</a> class.</li>
+</ol>
+
+<p>Because of its strengths and limitations, it is very common for a function to
+take a StringRef and for a method on an object to return a StringRef that
+points into some string that it owns.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_twine">llvm/ADT/Twine.h</a>
+</h4>
+
+<div>
+ <p>
+ The Twine class is used as an intermediary datatype for APIs that want to take
+ a string that can be constructed inline with a series of concatenations.
+ Twine works by forming recursive instances of the Twine datatype (a simple
+ value object) on the stack as temporary objects, linking them together into a
+ tree which is then linearized when the Twine is consumed. Twine is only safe
+ to use as the argument to a function, and should always be a const reference,
+ e.g.:
+ </p>
+
+ <pre>
+ void foo(const Twine &amp;T);
+ ...
+ StringRef X = ...
+ unsigned i = ...
+ foo(X + "." + Twine(i));
+ </pre>
+
+ <p>This example forms a string like "blarg.42" by concatenating the values
+ together, and does not form intermediate strings containing "blarg" or
+ "blarg.".
+ </p>
+
+ <p>Because Twine is constructed with temporary objects on the stack, and
+ because these instances are destroyed at the end of the current statement,
+ it is an inherently dangerous API. For example, this simple variant contains
+ undefined behavior and will probably crash:</p>
+
+ <pre>
+ void foo(const Twine &amp;T);
+ ...
+ StringRef X = ...
+ unsigned i = ...
+ const Twine &amp;Tmp = X + "." + Twine(i);
+ foo(Tmp);
+ </pre>
+
+ <p>... because the temporaries are destroyed before the call. That said,
+ Twine's are much more efficient than intermediate std::string temporaries, and
+ they work really well with StringRef. Just be aware of their limitations.</p>
+
+</div>
+
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_smallstring">llvm/ADT/SmallString.h</a>
+</h4>
+
+<div>
+
+<p>SmallString is a subclass of <a href="#dss_smallvector">SmallVector</a> that
+adds some convenience APIs like += that takes StringRef's. SmallString avoids
+allocating memory in the case when the preallocated space is enough to hold its
+data, and it calls back to general heap allocation when required. Since it owns
+its data, it is very safe to use and supports full mutation of the string.</p>
+
+<p>Like SmallVector's, the big downside to SmallString is their sizeof. While
+they are optimized for small strings, they themselves are not particularly
+small. This means that they work great for temporary scratch buffers on the
+stack, but should not generally be put into the heap: it is very rare to
+see a SmallString as the member of a frequently-allocated heap data structure
+or returned by-value.
+</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_stdstring">std::string</a>
+</h4>
+
+<div>
+
+ <p>The standard C++ std::string class is a very general class that (like
+ SmallString) owns its underlying data. sizeof(std::string) is very reasonable
+ so it can be embedded into heap data structures and returned by-value.
+ On the other hand, std::string is highly inefficient for inline editing (e.g.
+ concatenating a bunch of stuff together) and because it is provided by the
+ standard library, its performance characteristics depend a lot of the host
+ standard library (e.g. libc++ and MSVC provide a highly optimized string
+ class, GCC contains a really slow implementation).
+ </p>
+
+ <p>The major disadvantage of std::string is that almost every operation that
+ makes them larger can allocate memory, which is slow. As such, it is better
+ to use SmallVector or Twine as a scratch buffer, but then use std::string to
+ persist the result.</p>
+
+
+</div>
+
+<!-- end of strings -->
+</div>
+
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ds_set">Set-Like Containers (std::set, SmallSet, SetVector, etc)</a>
+</h3>
+
+<div>
+
+<p>Set-like containers are useful when you need to canonicalize multiple values
+into a single representation. There are several different choices for how to do
+this, providing various trade-offs.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_sortedvectorset">A sorted 'vector'</a>
+</h4>
+
+<div>
+
+<p>If you intend to insert a lot of elements, then do a lot of queries, a
+great approach is to use a vector (or other sequential container) with
+std::sort+std::unique to remove duplicates. This approach works really well if
+your usage pattern has these two distinct phases (insert then query), and can be
+coupled with a good choice of <a href="#ds_sequential">sequential container</a>.
+</p>
+
+<p>
+This combination provides the several nice properties: the result data is
+contiguous in memory (good for cache locality), has few allocations, is easy to
+address (iterators in the final vector are just indices or pointers), and can be
+efficiently queried with a standard binary or radix search.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_smallset">"llvm/ADT/SmallSet.h"</a>
+</h4>
+
+<div>
+
+<p>If you have a set-like data structure that is usually small and whose elements
+are reasonably small, a <tt>SmallSet&lt;Type, N&gt;</tt> is a good choice. This set
+has space for N elements in place (thus, if the set is dynamically smaller than
+N, no malloc traffic is required) and accesses them with a simple linear search.
+When the set grows beyond 'N' elements, it allocates a more expensive representation that
+guarantees efficient access (for most types, it falls back to std::set, but for
+pointers it uses something far better, <a
+href="#dss_smallptrset">SmallPtrSet</a>).</p>
+
+<p>The magic of this class is that it handles small sets extremely efficiently,
+but gracefully handles extremely large sets without loss of efficiency. The
+drawback is that the interface is quite small: it supports insertion, queries
+and erasing, but does not support iteration.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_smallptrset">"llvm/ADT/SmallPtrSet.h"</a>
+</h4>
+
+<div>
+
+<p>SmallPtrSet has all the advantages of <tt>SmallSet</tt> (and a <tt>SmallSet</tt> of pointers is
+transparently implemented with a <tt>SmallPtrSet</tt>), but also supports iterators. If
+more than 'N' insertions are performed, a single quadratically
+probed hash table is allocated and grows as needed, providing extremely
+efficient access (constant time insertion/deleting/queries with low constant
+factors) and is very stingy with malloc traffic.</p>
+
+<p>Note that, unlike <tt>std::set</tt>, the iterators of <tt>SmallPtrSet</tt> are invalidated
+whenever an insertion occurs. Also, the values visited by the iterators are not
+visited in sorted order.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_denseset">"llvm/ADT/DenseSet.h"</a>
+</h4>
+
+<div>
+
+<p>
+DenseSet is a simple quadratically probed hash table. It excels at supporting
+small values: it uses a single allocation to hold all of the pairs that
+are currently inserted in the set. DenseSet is a great way to unique small
+values that are not simple pointers (use <a
+href="#dss_smallptrset">SmallPtrSet</a> for pointers). Note that DenseSet has
+the same requirements for the value type that <a
+href="#dss_densemap">DenseMap</a> has.
+</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_sparseset">"llvm/ADT/SparseSet.h"</a>
+</h4>
+
+<div>
+
+<p>SparseSet holds a small number of objects identified by unsigned keys of
+moderate size. It uses a lot of memory, but provides operations that are
+almost as fast as a vector. Typical keys are physical registers, virtual
+registers, or numbered basic blocks.</p>
+
+<p>SparseSet is useful for algorithms that need very fast clear/find/insert/erase
+and fast iteration over small sets. It is not intended for building composite
+data structures.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_FoldingSet">"llvm/ADT/FoldingSet.h"</a>
+</h4>
+
+<div>
+
+<p>
+FoldingSet is an aggregate class that is really good at uniquing
+expensive-to-create or polymorphic objects. It is a combination of a chained
+hash table with intrusive links (uniqued objects are required to inherit from
+FoldingSetNode) that uses <a href="#dss_smallvector">SmallVector</a> as part of
+its ID process.</p>
+
+<p>Consider a case where you want to implement a "getOrCreateFoo" method for
+a complex object (for example, a node in the code generator). The client has a
+description of *what* it wants to generate (it knows the opcode and all the
+operands), but we don't want to 'new' a node, then try inserting it into a set
+only to find out it already exists, at which point we would have to delete it
+and return the node that already exists.
+</p>
+
+<p>To support this style of client, FoldingSet perform a query with a
+FoldingSetNodeID (which wraps SmallVector) that can be used to describe the
+element that we want to query for. The query either returns the element
+matching the ID or it returns an opaque ID that indicates where insertion should
+take place. Construction of the ID usually does not require heap traffic.</p>
+
+<p>Because FoldingSet uses intrusive links, it can support polymorphic objects
+in the set (for example, you can have SDNode instances mixed with LoadSDNodes).
+Because the elements are individually allocated, pointers to the elements are
+stable: inserting or removing elements does not invalidate any pointers to other
+elements.
+</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_set">&lt;set&gt;</a>
+</h4>
+
+<div>
+
+<p><tt>std::set</tt> is a reasonable all-around set class, which is decent at
+many things but great at nothing. std::set allocates memory for each element
+inserted (thus it is very malloc intensive) and typically stores three pointers
+per element in the set (thus adding a large amount of per-element space
+overhead). It offers guaranteed log(n) performance, which is not particularly
+fast from a complexity standpoint (particularly if the elements of the set are
+expensive to compare, like strings), and has extremely high constant factors for
+lookup, insertion and removal.</p>
+
+<p>The advantages of std::set are that its iterators are stable (deleting or
+inserting an element from the set does not affect iterators or pointers to other
+elements) and that iteration over the set is guaranteed to be in sorted order.
+If the elements in the set are large, then the relative overhead of the pointers
+and malloc traffic is not a big deal, but if the elements of the set are small,
+std::set is almost never a good choice.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_setvector">"llvm/ADT/SetVector.h"</a>
+</h4>
+
+<div>
+<p>LLVM's SetVector&lt;Type&gt; is an adapter class that combines your choice of
+a set-like container along with a <a href="#ds_sequential">Sequential
+Container</a>. The important property
+that this provides is efficient insertion with uniquing (duplicate elements are
+ignored) with iteration support. It implements this by inserting elements into
+both a set-like container and the sequential container, using the set-like
+container for uniquing and the sequential container for iteration.
+</p>
+
+<p>The difference between SetVector and other sets is that the order of
+iteration is guaranteed to match the order of insertion into the SetVector.
+This property is really important for things like sets of pointers. Because
+pointer values are non-deterministic (e.g. vary across runs of the program on
+different machines), iterating over the pointers in the set will
+not be in a well-defined order.</p>
+
+<p>
+The drawback of SetVector is that it requires twice as much space as a normal
+set and has the sum of constant factors from the set-like container and the
+sequential container that it uses. Use it *only* if you need to iterate over
+the elements in a deterministic order. SetVector is also expensive to delete
+elements out of (linear time), unless you use it's "pop_back" method, which is
+faster.
+</p>
+
+<p><tt>SetVector</tt> is an adapter class that defaults to
+ using <tt>std::vector</tt> and a size 16 <tt>SmallSet</tt> for the underlying
+ containers, so it is quite expensive. However,
+ <tt>"llvm/ADT/SetVector.h"</tt> also provides a <tt>SmallSetVector</tt>
+ class, which defaults to using a <tt>SmallVector</tt> and <tt>SmallSet</tt>
+ of a specified size. If you use this, and if your sets are dynamically
+ smaller than <tt>N</tt>, you will save a lot of heap traffic.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_uniquevector">"llvm/ADT/UniqueVector.h"</a>
+</h4>
+
+<div>
+
+<p>
+UniqueVector is similar to <a href="#dss_setvector">SetVector</a>, but it
+retains a unique ID for each element inserted into the set. It internally
+contains a map and a vector, and it assigns a unique ID for each value inserted
+into the set.</p>
+
+<p>UniqueVector is very expensive: its cost is the sum of the cost of
+maintaining both the map and vector, it has high complexity, high constant
+factors, and produces a lot of malloc traffic. It should be avoided.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_immutableset">"llvm/ADT/ImmutableSet.h"</a>
+</h4>
+
+<div>
+
+<p>
+ImmutableSet is an immutable (functional) set implementation based on an AVL
+tree.
+Adding or removing elements is done through a Factory object and results in the
+creation of a new ImmutableSet object.
+If an ImmutableSet already exists with the given contents, then the existing one
+is returned; equality is compared with a FoldingSetNodeID.
+The time and space complexity of add or remove operations is logarithmic in the
+size of the original set.
+
+<p>
+There is no method for returning an element of the set, you can only check for
+membership.
+
+</div>
+
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_otherset">Other Set-Like Container Options</a>
+</h4>
+
+<div>
+
+<p>
+The STL provides several other options, such as std::multiset and the various
+"hash_set" like containers (whether from C++ TR1 or from the SGI library). We
+never use hash_set and unordered_set because they are generally very expensive
+(each insertion requires a malloc) and very non-portable.
+</p>
+
+<p>std::multiset is useful if you're not interested in elimination of
+duplicates, but has all the drawbacks of std::set. A sorted vector (where you
+don't delete duplicate entries) or some other approach is almost always
+better.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ds_map">Map-Like Containers (std::map, DenseMap, etc)</a>
+</h3>
+
+<div>
+Map-like containers are useful when you want to associate data to a key. As
+usual, there are a lot of different ways to do this. :)
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_sortedvectormap">A sorted 'vector'</a>
+</h4>
+
+<div>
+
+<p>
+If your usage pattern follows a strict insert-then-query approach, you can
+trivially use the same approach as <a href="#dss_sortedvectorset">sorted vectors
+for set-like containers</a>. The only difference is that your query function
+(which uses std::lower_bound to get efficient log(n) lookup) should only compare
+the key, not both the key and value. This yields the same advantages as sorted
+vectors for sets.
+</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_stringmap">"llvm/ADT/StringMap.h"</a>
+</h4>
+
+<div>
+
+<p>
+Strings are commonly used as keys in maps, and they are difficult to support
+efficiently: they are variable length, inefficient to hash and compare when
+long, expensive to copy, etc. StringMap is a specialized container designed to
+cope with these issues. It supports mapping an arbitrary range of bytes to an
+arbitrary other object.</p>
+
+<p>The StringMap implementation uses a quadratically-probed hash table, where
+the buckets store a pointer to the heap allocated entries (and some other
+stuff). The entries in the map must be heap allocated because the strings are
+variable length. The string data (key) and the element object (value) are
+stored in the same allocation with the string data immediately after the element
+object. This container guarantees the "<tt>(char*)(&amp;Value+1)</tt>" points
+to the key string for a value.</p>
+
+<p>The StringMap is very fast for several reasons: quadratic probing is very
+cache efficient for lookups, the hash value of strings in buckets is not
+recomputed when looking up an element, StringMap rarely has to touch the
+memory for unrelated objects when looking up a value (even when hash collisions
+happen), hash table growth does not recompute the hash values for strings
+already in the table, and each pair in the map is store in a single allocation
+(the string data is stored in the same allocation as the Value of a pair).</p>
+
+<p>StringMap also provides query methods that take byte ranges, so it only ever
+copies a string if a value is inserted into the table.</p>
+
+<p>StringMap iteratation order, however, is not guaranteed to be deterministic,
+so any uses which require that should instead use a std::map.</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_indexedmap">"llvm/ADT/IndexedMap.h"</a>
+</h4>
+
+<div>
+<p>
+IndexedMap is a specialized container for mapping small dense integers (or
+values that can be mapped to small dense integers) to some other type. It is
+internally implemented as a vector with a mapping function that maps the keys to
+the dense integer range.
+</p>
+
+<p>
+This is useful for cases like virtual registers in the LLVM code generator: they
+have a dense mapping that is offset by a compile-time constant (the first
+virtual register ID).</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_densemap">"llvm/ADT/DenseMap.h"</a>
+</h4>
+
+<div>
+
+<p>
+DenseMap is a simple quadratically probed hash table. It excels at supporting
+small keys and values: it uses a single allocation to hold all of the pairs that
+are currently inserted in the map. DenseMap is a great way to map pointers to
+pointers, or map other small types to each other.
+</p>
+
+<p>
+There are several aspects of DenseMap that you should be aware of, however. The
+iterators in a DenseMap are invalidated whenever an insertion occurs, unlike
+map. Also, because DenseMap allocates space for a large number of key/value
+pairs (it starts with 64 by default), it will waste a lot of space if your keys
+or values are large. Finally, you must implement a partial specialization of
+DenseMapInfo for the key that you want, if it isn't already supported. This
+is required to tell DenseMap about two special marker values (which can never be
+inserted into the map) that it needs internally.</p>
+
+<p>
+DenseMap's find_as() method supports lookup operations using an alternate key
+type. This is useful in cases where the normal key type is expensive to
+construct, but cheap to compare against. The DenseMapInfo is responsible for
+defining the appropriate comparison and hashing methods for each alternate
+key type used.
+</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_valuemap">"llvm/ADT/ValueMap.h"</a>
+</h4>
+
+<div>
+
+<p>
+ValueMap is a wrapper around a <a href="#dss_densemap">DenseMap</a> mapping
+Value*s (or subclasses) to another type. When a Value is deleted or RAUW'ed,
+ValueMap will update itself so the new version of the key is mapped to the same
+value, just as if the key were a WeakVH. You can configure exactly how this
+happens, and what else happens on these two events, by passing
+a <code>Config</code> parameter to the ValueMap template.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_intervalmap">"llvm/ADT/IntervalMap.h"</a>
+</h4>
+
+<div>
+
+<p> IntervalMap is a compact map for small keys and values. It maps key
+intervals instead of single keys, and it will automatically coalesce adjacent
+intervals. When then map only contains a few intervals, they are stored in the
+map object itself to avoid allocations.</p>
+
+<p> The IntervalMap iterators are quite big, so they should not be passed around
+as STL iterators. The heavyweight iterators allow a smaller data structure.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_map">&lt;map&gt;</a>
+</h4>
+
+<div>
+
+<p>
+std::map has similar characteristics to <a href="#dss_set">std::set</a>: it uses
+a single allocation per pair inserted into the map, it offers log(n) lookup with
+an extremely large constant factor, imposes a space penalty of 3 pointers per
+pair in the map, etc.</p>
+
+<p>std::map is most useful when your keys or values are very large, if you need
+to iterate over the collection in sorted order, or if you need stable iterators
+into the map (i.e. they don't get invalidated if an insertion or deletion of
+another element takes place).</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_inteqclasses">"llvm/ADT/IntEqClasses.h"</a>
+</h4>
+
+<div>
+
+<p>IntEqClasses provides a compact representation of equivalence classes of
+small integers. Initially, each integer in the range 0..n-1 has its own
+equivalence class. Classes can be joined by passing two class representatives to
+the join(a, b) method. Two integers are in the same class when findLeader()
+returns the same representative.</p>
+
+<p>Once all equivalence classes are formed, the map can be compressed so each
+integer 0..n-1 maps to an equivalence class number in the range 0..m-1, where m
+is the total number of equivalence classes. The map must be uncompressed before
+it can be edited again.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_immutablemap">"llvm/ADT/ImmutableMap.h"</a>
+</h4>
+
+<div>
+
+<p>
+ImmutableMap is an immutable (functional) map implementation based on an AVL
+tree.
+Adding or removing elements is done through a Factory object and results in the
+creation of a new ImmutableMap object.
+If an ImmutableMap already exists with the given key set, then the existing one
+is returned; equality is compared with a FoldingSetNodeID.
+The time and space complexity of add or remove operations is logarithmic in the
+size of the original map.
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_othermap">Other Map-Like Container Options</a>
+</h4>
+
+<div>
+
+<p>
+The STL provides several other options, such as std::multimap and the various
+"hash_map" like containers (whether from C++ TR1 or from the SGI library). We
+never use hash_set and unordered_set because they are generally very expensive
+(each insertion requires a malloc) and very non-portable.</p>
+
+<p>std::multimap is useful if you want to map a key to multiple values, but has
+all the drawbacks of std::map. A sorted vector or some other approach is almost
+always better.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ds_bit">Bit storage containers (BitVector, SparseBitVector)</a>
+</h3>
+
+<div>
+<p>Unlike the other containers, there are only two bit storage containers, and
+choosing when to use each is relatively straightforward.</p>
+
+<p>One additional option is
+<tt>std::vector&lt;bool&gt;</tt>: we discourage its use for two reasons 1) the
+implementation in many common compilers (e.g. commonly available versions of
+GCC) is extremely inefficient and 2) the C++ standards committee is likely to
+deprecate this container and/or change it significantly somehow. In any case,
+please don't use it.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_bitvector">BitVector</a>
+</h4>
+
+<div>
+<p> The BitVector container provides a dynamic size set of bits for manipulation.
+It supports individual bit setting/testing, as well as set operations. The set
+operations take time O(size of bitvector), but operations are performed one word
+at a time, instead of one bit at a time. This makes the BitVector very fast for
+set operations compared to other containers. Use the BitVector when you expect
+the number of set bits to be high (IE a dense set).
+</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_smallbitvector">SmallBitVector</a>
+</h4>
+
+<div>
+<p> The SmallBitVector container provides the same interface as BitVector, but
+it is optimized for the case where only a small number of bits, less than
+25 or so, are needed. It also transparently supports larger bit counts, but
+slightly less efficiently than a plain BitVector, so SmallBitVector should
+only be used when larger counts are rare.
+</p>
+
+<p>
+At this time, SmallBitVector does not support set operations (and, or, xor),
+and its operator[] does not provide an assignable lvalue.
+</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="dss_sparsebitvector">SparseBitVector</a>
+</h4>
+
+<div>
+<p> The SparseBitVector container is much like BitVector, with one major
+difference: Only the bits that are set, are stored. This makes the
+SparseBitVector much more space efficient than BitVector when the set is sparse,
+as well as making set operations O(number of set bits) instead of O(size of
+universe). The downside to the SparseBitVector is that setting and testing of random bits is O(N), and on large SparseBitVectors, this can be slower than BitVector. In our implementation, setting or testing bits in sorted order
+(either forwards or reverse) is O(1) worst case. Testing and setting bits within 128 bits (depends on size) of the current bit is also O(1). As a general statement, testing/setting bits in a SparseBitVector is O(distance away from last set bit).
+</p>
+</div>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="common">Helpful Hints for Common Operations</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>This section describes how to perform some very simple transformations of
+LLVM code. This is meant to give examples of common idioms used, showing the
+practical side of LLVM transformations. <p> Because this is a "how-to" section,
+you should also read about the main classes that you will be working with. The
+<a href="#coreclasses">Core LLVM Class Hierarchy Reference</a> contains details
+and descriptions of the main classes that you should know about.</p>
+
+<!-- NOTE: this section should be heavy on example code -->
+<!-- ======================================================================= -->
+<h3>
+ <a name="inspection">Basic Inspection and Traversal Routines</a>
+</h3>
+
+<div>
+
+<p>The LLVM compiler infrastructure have many different data structures that may
+be traversed. Following the example of the C++ standard template library, the
+techniques used to traverse these various data structures are all basically the
+same. For a enumerable sequence of values, the <tt>XXXbegin()</tt> function (or
+method) returns an iterator to the start of the sequence, the <tt>XXXend()</tt>
+function returns an iterator pointing to one past the last valid element of the
+sequence, and there is some <tt>XXXiterator</tt> data type that is common
+between the two operations.</p>
+
+<p>Because the pattern for iteration is common across many different aspects of
+the program representation, the standard template library algorithms may be used
+on them, and it is easier to remember how to iterate. First we show a few common
+examples of the data structures that need to be traversed. Other data
+structures are traversed in very similar ways.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="iterate_function">Iterating over the </a><a
+ href="#BasicBlock"><tt>BasicBlock</tt></a>s in a <a
+ href="#Function"><tt>Function</tt></a>
+</h4>
+
+<div>
+
+<p>It's quite common to have a <tt>Function</tt> instance that you'd like to
+transform in some way; in particular, you'd like to manipulate its
+<tt>BasicBlock</tt>s. To facilitate this, you'll need to iterate over all of
+the <tt>BasicBlock</tt>s that constitute the <tt>Function</tt>. The following is
+an example that prints the name of a <tt>BasicBlock</tt> and the number of
+<tt>Instruction</tt>s it contains:</p>
+
+<div class="doc_code">
+<pre>
+// <i>func is a pointer to a Function instance</i>
+for (Function::iterator i = func-&gt;begin(), e = func-&gt;end(); i != e; ++i)
+ // <i>Print out the name of the basic block if it has one, and then the</i>
+ // <i>number of instructions that it contains</i>
+ errs() &lt;&lt; "Basic block (name=" &lt;&lt; i-&gt;getName() &lt;&lt; ") has "
+ &lt;&lt; i-&gt;size() &lt;&lt; " instructions.\n";
+</pre>
+</div>
+
+<p>Note that i can be used as if it were a pointer for the purposes of
+invoking member functions of the <tt>Instruction</tt> class. This is
+because the indirection operator is overloaded for the iterator
+classes. In the above code, the expression <tt>i-&gt;size()</tt> is
+exactly equivalent to <tt>(*i).size()</tt> just like you'd expect.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="iterate_basicblock">Iterating over the </a><a
+ href="#Instruction"><tt>Instruction</tt></a>s in a <a
+ href="#BasicBlock"><tt>BasicBlock</tt></a>
+</h4>
+
+<div>
+
+<p>Just like when dealing with <tt>BasicBlock</tt>s in <tt>Function</tt>s, it's
+easy to iterate over the individual instructions that make up
+<tt>BasicBlock</tt>s. Here's a code snippet that prints out each instruction in
+a <tt>BasicBlock</tt>:</p>
+
+<div class="doc_code">
+<pre>
+// <i>blk is a pointer to a BasicBlock instance</i>
+for (BasicBlock::iterator i = blk-&gt;begin(), e = blk-&gt;end(); i != e; ++i)
+ // <i>The next statement works since operator&lt;&lt;(ostream&amp;,...)</i>
+ // <i>is overloaded for Instruction&amp;</i>
+ errs() &lt;&lt; *i &lt;&lt; "\n";
+</pre>
+</div>
+
+<p>However, this isn't really the best way to print out the contents of a
+<tt>BasicBlock</tt>! Since the ostream operators are overloaded for virtually
+anything you'll care about, you could have just invoked the print routine on the
+basic block itself: <tt>errs() &lt;&lt; *blk &lt;&lt; "\n";</tt>.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="iterate_institer">Iterating over the </a><a
+ href="#Instruction"><tt>Instruction</tt></a>s in a <a
+ href="#Function"><tt>Function</tt></a>
+</h4>
+
+<div>
+
+<p>If you're finding that you commonly iterate over a <tt>Function</tt>'s
+<tt>BasicBlock</tt>s and then that <tt>BasicBlock</tt>'s <tt>Instruction</tt>s,
+<tt>InstIterator</tt> should be used instead. You'll need to include <a
+href="/doxygen/InstIterator_8h-source.html"><tt>llvm/Support/InstIterator.h</tt></a>,
+and then instantiate <tt>InstIterator</tt>s explicitly in your code. Here's a
+small example that shows how to dump all instructions in a function to the standard error stream:<p>
+
+<div class="doc_code">
+<pre>
+#include "<a href="/doxygen/InstIterator_8h-source.html">llvm/Support/InstIterator.h</a>"
+
+// <i>F is a pointer to a Function instance</i>
+for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
+ errs() &lt;&lt; *I &lt;&lt; "\n";
+</pre>
+</div>
+
+<p>Easy, isn't it? You can also use <tt>InstIterator</tt>s to fill a
+work list with its initial contents. For example, if you wanted to
+initialize a work list to contain all instructions in a <tt>Function</tt>
+F, all you would need to do is something like:</p>
+
+<div class="doc_code">
+<pre>
+std::set&lt;Instruction*&gt; worklist;
+// or better yet, SmallPtrSet&lt;Instruction*, 64&gt; worklist;
+
+for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
+ worklist.insert(&amp;*I);
+</pre>
+</div>
+
+<p>The STL set <tt>worklist</tt> would now contain all instructions in the
+<tt>Function</tt> pointed to by F.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="iterate_convert">Turning an iterator into a class pointer (and
+ vice-versa)</a>
+</h4>
+
+<div>
+
+<p>Sometimes, it'll be useful to grab a reference (or pointer) to a class
+instance when all you've got at hand is an iterator. Well, extracting
+a reference or a pointer from an iterator is very straight-forward.
+Assuming that <tt>i</tt> is a <tt>BasicBlock::iterator</tt> and <tt>j</tt>
+is a <tt>BasicBlock::const_iterator</tt>:</p>
+
+<div class="doc_code">
+<pre>
+Instruction&amp; inst = *i; // <i>Grab reference to instruction reference</i>
+Instruction* pinst = &amp;*i; // <i>Grab pointer to instruction reference</i>
+const Instruction&amp; inst = *j;
+</pre>
+</div>
+
+<p>However, the iterators you'll be working with in the LLVM framework are
+special: they will automatically convert to a ptr-to-instance type whenever they
+need to. Instead of dereferencing the iterator and then taking the address of
+the result, you can simply assign the iterator to the proper pointer type and
+you get the dereference and address-of operation as a result of the assignment
+(behind the scenes, this is a result of overloading casting mechanisms). Thus
+the last line of the last example,</p>
+
+<div class="doc_code">
+<pre>
+Instruction *pinst = &amp;*i;
+</pre>
+</div>
+
+<p>is semantically equivalent to</p>
+
+<div class="doc_code">
+<pre>
+Instruction *pinst = i;
+</pre>
+</div>
+
+<p>It's also possible to turn a class pointer into the corresponding iterator,
+and this is a constant time operation (very efficient). The following code
+snippet illustrates use of the conversion constructors provided by LLVM
+iterators. By using these, you can explicitly grab the iterator of something
+without actually obtaining it via iteration over some structure:</p>
+
+<div class="doc_code">
+<pre>
+void printNextInstruction(Instruction* inst) {
+ BasicBlock::iterator it(inst);
+ ++it; // <i>After this line, it refers to the instruction after *inst</i>
+ if (it != inst-&gt;getParent()-&gt;end()) errs() &lt;&lt; *it &lt;&lt; "\n";
+}
+</pre>
+</div>
+
+<p>Unfortunately, these implicit conversions come at a cost; they prevent
+these iterators from conforming to standard iterator conventions, and thus
+from being usable with standard algorithms and containers. For example, they
+prevent the following code, where <tt>B</tt> is a <tt>BasicBlock</tt>,
+from compiling:</p>
+
+<div class="doc_code">
+<pre>
+ llvm::SmallVector&lt;llvm::Instruction *, 16&gt;(B-&gt;begin(), B-&gt;end());
+</pre>
+</div>
+
+<p>Because of this, these implicit conversions may be removed some day,
+and <tt>operator*</tt> changed to return a pointer instead of a reference.</p>
+
+</div>
+
+<!--_______________________________________________________________________-->
+<h4>
+ <a name="iterate_complex">Finding call sites: a slightly more complex
+ example</a>
+</h4>
+
+<div>
+
+<p>Say that you're writing a FunctionPass and would like to count all the
+locations in the entire module (that is, across every <tt>Function</tt>) where a
+certain function (i.e., some <tt>Function</tt>*) is already in scope. As you'll
+learn later, you may want to use an <tt>InstVisitor</tt> to accomplish this in a
+much more straight-forward manner, but this example will allow us to explore how
+you'd do it if you didn't have <tt>InstVisitor</tt> around. In pseudo-code, this
+is what we want to do:</p>
+
+<div class="doc_code">
+<pre>
+initialize callCounter to zero
+for each Function f in the Module
+ for each BasicBlock b in f
+ for each Instruction i in b
+ if (i is a CallInst and calls the given function)
+ increment callCounter
+</pre>
+</div>
+
+<p>And the actual code is (remember, because we're writing a
+<tt>FunctionPass</tt>, our <tt>FunctionPass</tt>-derived class simply has to
+override the <tt>runOnFunction</tt> method):</p>
+
+<div class="doc_code">
+<pre>
+Function* targetFunc = ...;
+
+class OurFunctionPass : public FunctionPass {
+ public:
+ OurFunctionPass(): callCounter(0) { }
+
+ virtual runOnFunction(Function&amp; F) {
+ for (Function::iterator b = F.begin(), be = F.end(); b != be; ++b) {
+ for (BasicBlock::iterator i = b-&gt;begin(), ie = b-&gt;end(); i != ie; ++i) {
+ if (<a href="#CallInst">CallInst</a>* callInst = <a href="#isa">dyn_cast</a>&lt;<a
+ href="#CallInst">CallInst</a>&gt;(&amp;*i)) {
+ // <i>We know we've encountered a call instruction, so we</i>
+ // <i>need to determine if it's a call to the</i>
+ // <i>function pointed to by m_func or not.</i>
+ if (callInst-&gt;getCalledFunction() == targetFunc)
+ ++callCounter;
+ }
+ }
+ }
+ }
+
+ private:
+ unsigned callCounter;
+};
+</pre>
+</div>
+
+</div>
+
+<!--_______________________________________________________________________-->
+<h4>
+ <a name="calls_and_invokes">Treating calls and invokes the same way</a>
+</h4>
+
+<div>
+
+<p>You may have noticed that the previous example was a bit oversimplified in
+that it did not deal with call sites generated by 'invoke' instructions. In
+this, and in other situations, you may find that you want to treat
+<tt>CallInst</tt>s and <tt>InvokeInst</tt>s the same way, even though their
+most-specific common base class is <tt>Instruction</tt>, which includes lots of
+less closely-related things. For these cases, LLVM provides a handy wrapper
+class called <a
+href="http://llvm.org/doxygen/classllvm_1_1CallSite.html"><tt>CallSite</tt></a>.
+It is essentially a wrapper around an <tt>Instruction</tt> pointer, with some
+methods that provide functionality common to <tt>CallInst</tt>s and
+<tt>InvokeInst</tt>s.</p>
+
+<p>This class has "value semantics": it should be passed by value, not by
+reference and it should not be dynamically allocated or deallocated using
+<tt>operator new</tt> or <tt>operator delete</tt>. It is efficiently copyable,
+assignable and constructable, with costs equivalents to that of a bare pointer.
+If you look at its definition, it has only a single pointer member.</p>
+
+</div>
+
+<!--_______________________________________________________________________-->
+<h4>
+ <a name="iterate_chains">Iterating over def-use &amp; use-def chains</a>
+</h4>
+
+<div>
+
+<p>Frequently, we might have an instance of the <a
+href="/doxygen/classllvm_1_1Value.html">Value Class</a> and we want to
+determine which <tt>User</tt>s use the <tt>Value</tt>. The list of all
+<tt>User</tt>s of a particular <tt>Value</tt> is called a <i>def-use</i> chain.
+For example, let's say we have a <tt>Function*</tt> named <tt>F</tt> to a
+particular function <tt>foo</tt>. Finding all of the instructions that
+<i>use</i> <tt>foo</tt> is as simple as iterating over the <i>def-use</i> chain
+of <tt>F</tt>:</p>
+
+<div class="doc_code">
+<pre>
+Function *F = ...;
+
+for (Value::use_iterator i = F-&gt;use_begin(), e = F-&gt;use_end(); i != e; ++i)
+ if (Instruction *Inst = dyn_cast&lt;Instruction&gt;(*i)) {
+ errs() &lt;&lt; "F is used in instruction:\n";
+ errs() &lt;&lt; *Inst &lt;&lt; "\n";
+ }
+</pre>
+</div>
+
+<p>Note that dereferencing a <tt>Value::use_iterator</tt> is not a very cheap
+operation. Instead of performing <tt>*i</tt> above several times, consider
+doing it only once in the loop body and reusing its result.</p>
+
+<p>Alternatively, it's common to have an instance of the <a
+href="/doxygen/classllvm_1_1User.html">User Class</a> and need to know what
+<tt>Value</tt>s are used by it. The list of all <tt>Value</tt>s used by a
+<tt>User</tt> is known as a <i>use-def</i> chain. Instances of class
+<tt>Instruction</tt> are common <tt>User</tt>s, so we might want to iterate over
+all of the values that a particular instruction uses (that is, the operands of
+the particular <tt>Instruction</tt>):</p>
+
+<div class="doc_code">
+<pre>
+Instruction *pi = ...;
+
+for (User::op_iterator i = pi-&gt;op_begin(), e = pi-&gt;op_end(); i != e; ++i) {
+ Value *v = *i;
+ // <i>...</i>
+}
+</pre>
+</div>
+
+<p>Declaring objects as <tt>const</tt> is an important tool of enforcing
+mutation free algorithms (such as analyses, etc.). For this purpose above
+iterators come in constant flavors as <tt>Value::const_use_iterator</tt>
+and <tt>Value::const_op_iterator</tt>. They automatically arise when
+calling <tt>use/op_begin()</tt> on <tt>const Value*</tt>s or
+<tt>const User*</tt>s respectively. Upon dereferencing, they return
+<tt>const Use*</tt>s. Otherwise the above patterns remain unchanged.</p>
+
+</div>
+
+<!--_______________________________________________________________________-->
+<h4>
+ <a name="iterate_preds">Iterating over predecessors &amp;
+successors of blocks</a>
+</h4>
+
+<div>
+
+<p>Iterating over the predecessors and successors of a block is quite easy
+with the routines defined in <tt>"llvm/Support/CFG.h"</tt>. Just use code like
+this to iterate over all predecessors of BB:</p>
+
+<div class="doc_code">
+<pre>
+#include "llvm/Support/CFG.h"
+BasicBlock *BB = ...;
+
+for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ BasicBlock *Pred = *PI;
+ // <i>...</i>
+}
+</pre>
+</div>
+
+<p>Similarly, to iterate over successors use
+succ_iterator/succ_begin/succ_end.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="simplechanges">Making simple changes</a>
+</h3>
+
+<div>
+
+<p>There are some primitive transformation operations present in the LLVM
+infrastructure that are worth knowing about. When performing
+transformations, it's fairly common to manipulate the contents of basic
+blocks. This section describes some of the common methods for doing so
+and gives example code.</p>
+
+<!--_______________________________________________________________________-->
+<h4>
+ <a name="schanges_creating">Creating and inserting new
+ <tt>Instruction</tt>s</a>
+</h4>
+
+<div>
+
+<p><i>Instantiating Instructions</i></p>
+
+<p>Creation of <tt>Instruction</tt>s is straight-forward: simply call the
+constructor for the kind of instruction to instantiate and provide the necessary
+parameters. For example, an <tt>AllocaInst</tt> only <i>requires</i> a
+(const-ptr-to) <tt>Type</tt>. Thus:</p>
+
+<div class="doc_code">
+<pre>
+AllocaInst* ai = new AllocaInst(Type::Int32Ty);
+</pre>
+</div>
+
+<p>will create an <tt>AllocaInst</tt> instance that represents the allocation of
+one integer in the current stack frame, at run time. Each <tt>Instruction</tt>
+subclass is likely to have varying default parameters which change the semantics
+of the instruction, so refer to the <a
+href="/doxygen/classllvm_1_1Instruction.html">doxygen documentation for the subclass of
+Instruction</a> that you're interested in instantiating.</p>
+
+<p><i>Naming values</i></p>
+
+<p>It is very useful to name the values of instructions when you're able to, as
+this facilitates the debugging of your transformations. If you end up looking
+at generated LLVM machine code, you definitely want to have logical names
+associated with the results of instructions! By supplying a value for the
+<tt>Name</tt> (default) parameter of the <tt>Instruction</tt> constructor, you
+associate a logical name with the result of the instruction's execution at
+run time. For example, say that I'm writing a transformation that dynamically
+allocates space for an integer on the stack, and that integer is going to be
+used as some kind of index by some other code. To accomplish this, I place an
+<tt>AllocaInst</tt> at the first point in the first <tt>BasicBlock</tt> of some
+<tt>Function</tt>, and I'm intending to use it within the same
+<tt>Function</tt>. I might do:</p>
+
+<div class="doc_code">
+<pre>
+AllocaInst* pa = new AllocaInst(Type::Int32Ty, 0, "indexLoc");
+</pre>
+</div>
+
+<p>where <tt>indexLoc</tt> is now the logical name of the instruction's
+execution value, which is a pointer to an integer on the run time stack.</p>
+
+<p><i>Inserting instructions</i></p>
+
+<p>There are essentially two ways to insert an <tt>Instruction</tt>
+into an existing sequence of instructions that form a <tt>BasicBlock</tt>:</p>
+
+<ul>
+ <li>Insertion into an explicit instruction list
+
+ <p>Given a <tt>BasicBlock* pb</tt>, an <tt>Instruction* pi</tt> within that
+ <tt>BasicBlock</tt>, and a newly-created instruction we wish to insert
+ before <tt>*pi</tt>, we do the following: </p>
+
+<div class="doc_code">
+<pre>
+BasicBlock *pb = ...;
+Instruction *pi = ...;
+Instruction *newInst = new Instruction(...);
+
+pb-&gt;getInstList().insert(pi, newInst); // <i>Inserts newInst before pi in pb</i>
+</pre>
+</div>
+
+ <p>Appending to the end of a <tt>BasicBlock</tt> is so common that
+ the <tt>Instruction</tt> class and <tt>Instruction</tt>-derived
+ classes provide constructors which take a pointer to a
+ <tt>BasicBlock</tt> to be appended to. For example code that
+ looked like: </p>
+
+<div class="doc_code">
+<pre>
+BasicBlock *pb = ...;
+Instruction *newInst = new Instruction(...);
+
+pb-&gt;getInstList().push_back(newInst); // <i>Appends newInst to pb</i>
+</pre>
+</div>
+
+ <p>becomes: </p>
+
+<div class="doc_code">
+<pre>
+BasicBlock *pb = ...;
+Instruction *newInst = new Instruction(..., pb);
+</pre>
+</div>
+
+ <p>which is much cleaner, especially if you are creating
+ long instruction streams.</p></li>
+
+ <li>Insertion into an implicit instruction list
+
+ <p><tt>Instruction</tt> instances that are already in <tt>BasicBlock</tt>s
+ are implicitly associated with an existing instruction list: the instruction
+ list of the enclosing basic block. Thus, we could have accomplished the same
+ thing as the above code without being given a <tt>BasicBlock</tt> by doing:
+ </p>
+
+<div class="doc_code">
+<pre>
+Instruction *pi = ...;
+Instruction *newInst = new Instruction(...);
+
+pi-&gt;getParent()-&gt;getInstList().insert(pi, newInst);
+</pre>
+</div>
+
+ <p>In fact, this sequence of steps occurs so frequently that the
+ <tt>Instruction</tt> class and <tt>Instruction</tt>-derived classes provide
+ constructors which take (as a default parameter) a pointer to an
+ <tt>Instruction</tt> which the newly-created <tt>Instruction</tt> should
+ precede. That is, <tt>Instruction</tt> constructors are capable of
+ inserting the newly-created instance into the <tt>BasicBlock</tt> of a
+ provided instruction, immediately before that instruction. Using an
+ <tt>Instruction</tt> constructor with a <tt>insertBefore</tt> (default)
+ parameter, the above code becomes:</p>
+
+<div class="doc_code">
+<pre>
+Instruction* pi = ...;
+Instruction* newInst = new Instruction(..., pi);
+</pre>
+</div>
+
+ <p>which is much cleaner, especially if you're creating a lot of
+ instructions and adding them to <tt>BasicBlock</tt>s.</p></li>
+</ul>
+
+</div>
+
+<!--_______________________________________________________________________-->
+<h4>
+ <a name="schanges_deleting">Deleting <tt>Instruction</tt>s</a>
+</h4>
+
+<div>
+
+<p>Deleting an instruction from an existing sequence of instructions that form a
+<a href="#BasicBlock"><tt>BasicBlock</tt></a> is very straight-forward: just
+call the instruction's eraseFromParent() method. For example:</p>
+
+<div class="doc_code">
+<pre>
+<a href="#Instruction">Instruction</a> *I = .. ;
+I-&gt;eraseFromParent();
+</pre>
+</div>
+
+<p>This unlinks the instruction from its containing basic block and deletes
+it. If you'd just like to unlink the instruction from its containing basic
+block but not delete it, you can use the <tt>removeFromParent()</tt> method.</p>
+
+</div>
+
+<!--_______________________________________________________________________-->
+<h4>
+ <a name="schanges_replacing">Replacing an <tt>Instruction</tt> with another
+ <tt>Value</tt></a>
+</h4>
+
+<div>
+
+<h5><i>Replacing individual instructions</i></h5>
+
+<p>Including "<a href="/doxygen/BasicBlockUtils_8h-source.html">llvm/Transforms/Utils/BasicBlockUtils.h</a>"
+permits use of two very useful replace functions: <tt>ReplaceInstWithValue</tt>
+and <tt>ReplaceInstWithInst</tt>.</p>
+
+<h5><a name="schanges_deleting">Deleting <tt>Instruction</tt>s</a></h5>
+
+<div>
+<ul>
+ <li><tt>ReplaceInstWithValue</tt>
+
+ <p>This function replaces all uses of a given instruction with a value,
+ and then removes the original instruction. The following example
+ illustrates the replacement of the result of a particular
+ <tt>AllocaInst</tt> that allocates memory for a single integer with a null
+ pointer to an integer.</p>
+
+<div class="doc_code">
+<pre>
+AllocaInst* instToReplace = ...;
+BasicBlock::iterator ii(instToReplace);
+
+ReplaceInstWithValue(instToReplace-&gt;getParent()-&gt;getInstList(), ii,
+ Constant::getNullValue(PointerType::getUnqual(Type::Int32Ty)));
+</pre></div></li>
+
+ <li><tt>ReplaceInstWithInst</tt>
+
+ <p>This function replaces a particular instruction with another
+ instruction, inserting the new instruction into the basic block at the
+ location where the old instruction was, and replacing any uses of the old
+ instruction with the new instruction. The following example illustrates
+ the replacement of one <tt>AllocaInst</tt> with another.</p>
+
+<div class="doc_code">
+<pre>
+AllocaInst* instToReplace = ...;
+BasicBlock::iterator ii(instToReplace);
+
+ReplaceInstWithInst(instToReplace-&gt;getParent()-&gt;getInstList(), ii,
+ new AllocaInst(Type::Int32Ty, 0, "ptrToReplacedInt"));
+</pre></div></li>
+</ul>
+
+</div>
+
+<h5><i>Replacing multiple uses of <tt>User</tt>s and <tt>Value</tt>s</i></h5>
+
+<p>You can use <tt>Value::replaceAllUsesWith</tt> and
+<tt>User::replaceUsesOfWith</tt> to change more than one use at a time. See the
+doxygen documentation for the <a href="/doxygen/classllvm_1_1Value.html">Value Class</a>
+and <a href="/doxygen/classllvm_1_1User.html">User Class</a>, respectively, for more
+information.</p>
+
+<!-- Value::replaceAllUsesWith User::replaceUsesOfWith Point out:
+include/llvm/Transforms/Utils/ especially BasicBlockUtils.h with:
+ReplaceInstWithValue, ReplaceInstWithInst -->
+
+</div>
+
+<!--_______________________________________________________________________-->
+<h4>
+ <a name="schanges_deletingGV">Deleting <tt>GlobalVariable</tt>s</a>
+</h4>
+
+<div>
+
+<p>Deleting a global variable from a module is just as easy as deleting an
+Instruction. First, you must have a pointer to the global variable that you wish
+ to delete. You use this pointer to erase it from its parent, the module.
+ For example:</p>
+
+<div class="doc_code">
+<pre>
+<a href="#GlobalVariable">GlobalVariable</a> *GV = .. ;
+
+GV-&gt;eraseFromParent();
+</pre>
+</div>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="create_types">How to Create Types</a>
+</h3>
+
+<div>
+
+<p>In generating IR, you may need some complex types. If you know these types
+statically, you can use <tt>TypeBuilder&lt;...&gt;::get()</tt>, defined
+in <tt>llvm/Support/TypeBuilder.h</tt>, to retrieve them. <tt>TypeBuilder</tt>
+has two forms depending on whether you're building types for cross-compilation
+or native library use. <tt>TypeBuilder&lt;T, true&gt;</tt> requires
+that <tt>T</tt> be independent of the host environment, meaning that it's built
+out of types from
+the <a href="/doxygen/namespacellvm_1_1types.html"><tt>llvm::types</tt></a>
+namespace and pointers, functions, arrays, etc. built of
+those. <tt>TypeBuilder&lt;T, false&gt;</tt> additionally allows native C types
+whose size may depend on the host compiler. For example,</p>
+
+<div class="doc_code">
+<pre>
+FunctionType *ft = TypeBuilder&lt;types::i&lt;8&gt;(types::i&lt;32&gt;*), true&gt;::get();
+</pre>
+</div>
+
+<p>is easier to read and write than the equivalent</p>
+
+<div class="doc_code">
+<pre>
+std::vector&lt;const Type*&gt; params;
+params.push_back(PointerType::getUnqual(Type::Int32Ty));
+FunctionType *ft = FunctionType::get(Type::Int8Ty, params, false);
+</pre>
+</div>
+
+<p>See the <a href="/doxygen/TypeBuilder_8h-source.html#l00001">class
+comment</a> for more details.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="threading">Threads and LLVM</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+<p>
+This section describes the interaction of the LLVM APIs with multithreading,
+both on the part of client applications, and in the JIT, in the hosted
+application.
+</p>
+
+<p>
+Note that LLVM's support for multithreading is still relatively young. Up
+through version 2.5, the execution of threaded hosted applications was
+supported, but not threaded client access to the APIs. While this use case is
+now supported, clients <em>must</em> adhere to the guidelines specified below to
+ensure proper operation in multithreaded mode.
+</p>
+
+<p>
+Note that, on Unix-like platforms, LLVM requires the presence of GCC's atomic
+intrinsics in order to support threaded operation. If you need a
+multhreading-capable LLVM on a platform without a suitably modern system
+compiler, consider compiling LLVM and LLVM-GCC in single-threaded mode, and
+using the resultant compiler to build a copy of LLVM with multithreading
+support.
+</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="startmultithreaded">Entering and Exiting Multithreaded Mode</a>
+</h3>
+
+<div>
+
+<p>
+In order to properly protect its internal data structures while avoiding
+excessive locking overhead in the single-threaded case, the LLVM must intialize
+certain data structures necessary to provide guards around its internals. To do
+so, the client program must invoke <tt>llvm_start_multithreaded()</tt> before
+making any concurrent LLVM API calls. To subsequently tear down these
+structures, use the <tt>llvm_stop_multithreaded()</tt> call. You can also use
+the <tt>llvm_is_multithreaded()</tt> call to check the status of multithreaded
+mode.
+</p>
+
+<p>
+Note that both of these calls must be made <em>in isolation</em>. That is to
+say that no other LLVM API calls may be executing at any time during the
+execution of <tt>llvm_start_multithreaded()</tt> or <tt>llvm_stop_multithreaded
+</tt>. It's is the client's responsibility to enforce this isolation.
+</p>
+
+<p>
+The return value of <tt>llvm_start_multithreaded()</tt> indicates the success or
+failure of the initialization. Failure typically indicates that your copy of
+LLVM was built without multithreading support, typically because GCC atomic
+intrinsics were not found in your system compiler. In this case, the LLVM API
+will not be safe for concurrent calls. However, it <em>will</em> be safe for
+hosting threaded applications in the JIT, though <a href="#jitthreading">care
+must be taken</a> to ensure that side exits and the like do not accidentally
+result in concurrent LLVM API calls.
+</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="shutdown">Ending Execution with <tt>llvm_shutdown()</tt></a>
+</h3>
+
+<div>
+<p>
+When you are done using the LLVM APIs, you should call <tt>llvm_shutdown()</tt>
+to deallocate memory used for internal structures. This will also invoke
+<tt>llvm_stop_multithreaded()</tt> if LLVM is operating in multithreaded mode.
+As such, <tt>llvm_shutdown()</tt> requires the same isolation guarantees as
+<tt>llvm_stop_multithreaded()</tt>.
+</p>
+
+<p>
+Note that, if you use scope-based shutdown, you can use the
+<tt>llvm_shutdown_obj</tt> class, which calls <tt>llvm_shutdown()</tt> in its
+destructor.
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="managedstatic">Lazy Initialization with <tt>ManagedStatic</tt></a>
+</h3>
+
+<div>
+<p>
+<tt>ManagedStatic</tt> is a utility class in LLVM used to implement static
+initialization of static resources, such as the global type tables. Before the
+invocation of <tt>llvm_shutdown()</tt>, it implements a simple lazy
+initialization scheme. Once <tt>llvm_start_multithreaded()</tt> returns,
+however, it uses double-checked locking to implement thread-safe lazy
+initialization.
+</p>
+
+<p>
+Note that, because no other threads are allowed to issue LLVM API calls before
+<tt>llvm_start_multithreaded()</tt> returns, it is possible to have
+<tt>ManagedStatic</tt>s of <tt>llvm::sys::Mutex</tt>s.
+</p>
+
+<p>
+The <tt>llvm_acquire_global_lock()</tt> and <tt>llvm_release_global_lock</tt>
+APIs provide access to the global lock used to implement the double-checked
+locking for lazy initialization. These should only be used internally to LLVM,
+and only if you know what you're doing!
+</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="llvmcontext">Achieving Isolation with <tt>LLVMContext</tt></a>
+</h3>
+
+<div>
+<p>
+<tt>LLVMContext</tt> is an opaque class in the LLVM API which clients can use
+to operate multiple, isolated instances of LLVM concurrently within the same
+address space. For instance, in a hypothetical compile-server, the compilation
+of an individual translation unit is conceptually independent from all the
+others, and it would be desirable to be able to compile incoming translation
+units concurrently on independent server threads. Fortunately,
+<tt>LLVMContext</tt> exists to enable just this kind of scenario!
+</p>
+
+<p>
+Conceptually, <tt>LLVMContext</tt> provides isolation. Every LLVM entity
+(<tt>Module</tt>s, <tt>Value</tt>s, <tt>Type</tt>s, <tt>Constant</tt>s, etc.)
+in LLVM's in-memory IR belongs to an <tt>LLVMContext</tt>. Entities in
+different contexts <em>cannot</em> interact with each other: <tt>Module</tt>s in
+different contexts cannot be linked together, <tt>Function</tt>s cannot be added
+to <tt>Module</tt>s in different contexts, etc. What this means is that is is
+safe to compile on multiple threads simultaneously, as long as no two threads
+operate on entities within the same context.
+</p>
+
+<p>
+In practice, very few places in the API require the explicit specification of a
+<tt>LLVMContext</tt>, other than the <tt>Type</tt> creation/lookup APIs.
+Because every <tt>Type</tt> carries a reference to its owning context, most
+other entities can determine what context they belong to by looking at their
+own <tt>Type</tt>. If you are adding new entities to LLVM IR, please try to
+maintain this interface design.
+</p>
+
+<p>
+For clients that do <em>not</em> require the benefits of isolation, LLVM
+provides a convenience API <tt>getGlobalContext()</tt>. This returns a global,
+lazily initialized <tt>LLVMContext</tt> that may be used in situations where
+isolation is not a concern.
+</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="jitthreading">Threads and the JIT</a>
+</h3>
+
+<div>
+<p>
+LLVM's "eager" JIT compiler is safe to use in threaded programs. Multiple
+threads can call <tt>ExecutionEngine::getPointerToFunction()</tt> or
+<tt>ExecutionEngine::runFunction()</tt> concurrently, and multiple threads can
+run code output by the JIT concurrently. The user must still ensure that only
+one thread accesses IR in a given <tt>LLVMContext</tt> while another thread
+might be modifying it. One way to do that is to always hold the JIT lock while
+accessing IR outside the JIT (the JIT <em>modifies</em> the IR by adding
+<tt>CallbackVH</tt>s). Another way is to only
+call <tt>getPointerToFunction()</tt> from the <tt>LLVMContext</tt>'s thread.
+</p>
+
+<p>When the JIT is configured to compile lazily (using
+<tt>ExecutionEngine::DisableLazyCompilation(false)</tt>), there is currently a
+<a href="http://llvm.org/bugs/show_bug.cgi?id=5184">race condition</a> in
+updating call sites after a function is lazily-jitted. It's still possible to
+use the lazy JIT in a threaded program if you ensure that only one thread at a
+time can call any particular lazy stub and that the JIT lock guards any IR
+access, but we suggest using only the eager JIT in threaded programs.
+</p>
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="advanced">Advanced Topics</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+<p>
+This section describes some of the advanced or obscure API's that most clients
+do not need to be aware of. These API's tend manage the inner workings of the
+LLVM system, and only need to be accessed in unusual circumstances.
+</p>
+
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="SymbolTable">The <tt>ValueSymbolTable</tt> class</a>
+</h3>
+
+<div>
+<p>The <tt><a href="http://llvm.org/doxygen/classllvm_1_1ValueSymbolTable.html">
+ValueSymbolTable</a></tt> class provides a symbol table that the <a
+href="#Function"><tt>Function</tt></a> and <a href="#Module">
+<tt>Module</tt></a> classes use for naming value definitions. The symbol table
+can provide a name for any <a href="#Value"><tt>Value</tt></a>.
+</p>
+
+<p>Note that the <tt>SymbolTable</tt> class should not be directly accessed
+by most clients. It should only be used when iteration over the symbol table
+names themselves are required, which is very special purpose. Note that not
+all LLVM
+<tt><a href="#Value">Value</a></tt>s have names, and those without names (i.e. they have
+an empty name) do not exist in the symbol table.
+</p>
+
+<p>Symbol tables support iteration over the values in the symbol
+table with <tt>begin/end/iterator</tt> and supports querying to see if a
+specific name is in the symbol table (with <tt>lookup</tt>). The
+<tt>ValueSymbolTable</tt> class exposes no public mutator methods, instead,
+simply call <tt>setName</tt> on a value, which will autoinsert it into the
+appropriate symbol table.</p>
+
+</div>
+
+
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="UserLayout">The <tt>User</tt> and owned <tt>Use</tt> classes' memory layout</a>
+</h3>
+
+<div>
+<p>The <tt><a href="http://llvm.org/doxygen/classllvm_1_1User.html">
+User</a></tt> class provides a basis for expressing the ownership of <tt>User</tt>
+towards other <tt><a href="http://llvm.org/doxygen/classllvm_1_1Value.html">
+Value</a></tt>s. The <tt><a href="http://llvm.org/doxygen/classllvm_1_1Use.html">
+Use</a></tt> helper class is employed to do the bookkeeping and to facilitate <i>O(1)</i>
+addition and removal.</p>
+
+<!-- ______________________________________________________________________ -->
+<h4>
+ <a name="Use2User">
+ Interaction and relationship between <tt>User</tt> and <tt>Use</tt> objects
+ </a>
+</h4>
+
+<div>
+<p>
+A subclass of <tt>User</tt> can choose between incorporating its <tt>Use</tt> objects
+or refer to them out-of-line by means of a pointer. A mixed variant
+(some <tt>Use</tt>s inline others hung off) is impractical and breaks the invariant
+that the <tt>Use</tt> objects belonging to the same <tt>User</tt> form a contiguous array.
+</p>
+
+<p>
+We have 2 different layouts in the <tt>User</tt> (sub)classes:
+<ul>
+<li><p>Layout a)
+The <tt>Use</tt> object(s) are inside (resp. at fixed offset) of the <tt>User</tt>
+object and there are a fixed number of them.</p>
+
+<li><p>Layout b)
+The <tt>Use</tt> object(s) are referenced by a pointer to an
+array from the <tt>User</tt> object and there may be a variable
+number of them.</p>
+</ul>
+<p>
+As of v2.4 each layout still possesses a direct pointer to the
+start of the array of <tt>Use</tt>s. Though not mandatory for layout a),
+we stick to this redundancy for the sake of simplicity.
+The <tt>User</tt> object also stores the number of <tt>Use</tt> objects it
+has. (Theoretically this information can also be calculated
+given the scheme presented below.)</p>
+<p>
+Special forms of allocation operators (<tt>operator new</tt>)
+enforce the following memory layouts:</p>
+
+<ul>
+<li><p>Layout a) is modelled by prepending the <tt>User</tt> object by the <tt>Use[]</tt> array.</p>
+
+<pre>
+...---.---.---.---.-------...
+ | P | P | P | P | User
+'''---'---'---'---'-------'''
+</pre>
+
+<li><p>Layout b) is modelled by pointing at the <tt>Use[]</tt> array.</p>
+<pre>
+.-------...
+| User
+'-------'''
+ |
+ v
+ .---.---.---.---...
+ | P | P | P | P |
+ '---'---'---'---'''
+</pre>
+</ul>
+<i>(In the above figures '<tt>P</tt>' stands for the <tt>Use**</tt> that
+ is stored in each <tt>Use</tt> object in the member <tt>Use::Prev</tt>)</i>
+
+</div>
+
+<!-- ______________________________________________________________________ -->
+<h4>
+ <a name="Waymarking">The waymarking algorithm</a>
+</h4>
+
+<div>
+<p>
+Since the <tt>Use</tt> objects are deprived of the direct (back)pointer to
+their <tt>User</tt> objects, there must be a fast and exact method to
+recover it. This is accomplished by the following scheme:</p>
+
+A bit-encoding in the 2 LSBits (least significant bits) of the <tt>Use::Prev</tt> allows to find the
+start of the <tt>User</tt> object:
+<ul>
+<li><tt>00</tt> &mdash;&gt; binary digit 0</li>
+<li><tt>01</tt> &mdash;&gt; binary digit 1</li>
+<li><tt>10</tt> &mdash;&gt; stop and calculate (<tt>s</tt>)</li>
+<li><tt>11</tt> &mdash;&gt; full stop (<tt>S</tt>)</li>
+</ul>
+<p>
+Given a <tt>Use*</tt>, all we have to do is to walk till we get
+a stop and we either have a <tt>User</tt> immediately behind or
+we have to walk to the next stop picking up digits
+and calculating the offset:</p>
+<pre>
+.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.---.----------------
+| 1 | s | 1 | 0 | 1 | 0 | s | 1 | 1 | 0 | s | 1 | 1 | s | 1 | S | User (or User*)
+'---'---'---'---'---'---'---'---'---'---'---'---'---'---'---'---'----------------
+ |+15 |+10 |+6 |+3 |+1
+ | | | | |__>
+ | | | |__________>
+ | | |______________________>
+ | |______________________________________>
+ |__________________________________________________________>
+</pre>
+<p>
+Only the significant number of bits need to be stored between the
+stops, so that the <i>worst case is 20 memory accesses</i> when there are
+1000 <tt>Use</tt> objects associated with a <tt>User</tt>.</p>
+
+</div>
+
+<!-- ______________________________________________________________________ -->
+<h4>
+ <a name="ReferenceImpl">Reference implementation</a>
+</h4>
+
+<div>
+<p>
+The following literate Haskell fragment demonstrates the concept:</p>
+
+<div class="doc_code">
+<pre>
+> import Test.QuickCheck
+>
+> digits :: Int -> [Char] -> [Char]
+> digits 0 acc = '0' : acc
+> digits 1 acc = '1' : acc
+> digits n acc = digits (n `div` 2) $ digits (n `mod` 2) acc
+>
+> dist :: Int -> [Char] -> [Char]
+> dist 0 [] = ['S']
+> dist 0 acc = acc
+> dist 1 acc = let r = dist 0 acc in 's' : digits (length r) r
+> dist n acc = dist (n - 1) $ dist 1 acc
+>
+> takeLast n ss = reverse $ take n $ reverse ss
+>
+> test = takeLast 40 $ dist 20 []
+>
+</pre>
+</div>
+<p>
+Printing &lt;test&gt; gives: <tt>"1s100000s11010s10100s1111s1010s110s11s1S"</tt></p>
+<p>
+The reverse algorithm computes the length of the string just by examining
+a certain prefix:</p>
+
+<div class="doc_code">
+<pre>
+> pref :: [Char] -> Int
+> pref "S" = 1
+> pref ('s':'1':rest) = decode 2 1 rest
+> pref (_:rest) = 1 + pref rest
+>
+> decode walk acc ('0':rest) = decode (walk + 1) (acc * 2) rest
+> decode walk acc ('1':rest) = decode (walk + 1) (acc * 2 + 1) rest
+> decode walk acc _ = walk + acc
+>
+</pre>
+</div>
+<p>
+Now, as expected, printing &lt;pref test&gt; gives <tt>40</tt>.</p>
+<p>
+We can <i>quickCheck</i> this with following property:</p>
+
+<div class="doc_code">
+<pre>
+> testcase = dist 2000 []
+> testcaseLength = length testcase
+>
+> identityProp n = n > 0 && n <= testcaseLength ==> length arr == pref arr
+> where arr = takeLast n testcase
+>
+</pre>
+</div>
+<p>
+As expected &lt;quickCheck identityProp&gt; gives:</p>
+
+<pre>
+*Main> quickCheck identityProp
+OK, passed 100 tests.
+</pre>
+<p>
+Let's be a bit more exhaustive:</p>
+
+<div class="doc_code">
+<pre>
+>
+> deepCheck p = check (defaultConfig { configMaxTest = 500 }) p
+>
+</pre>
+</div>
+<p>
+And here is the result of &lt;deepCheck identityProp&gt;:</p>
+
+<pre>
+*Main> deepCheck identityProp
+OK, passed 500 tests.
+</pre>
+
+</div>
+
+<!-- ______________________________________________________________________ -->
+<h4>
+ <a name="Tagging">Tagging considerations</a>
+</h4>
+
+<div>
+
+<p>
+To maintain the invariant that the 2 LSBits of each <tt>Use**</tt> in <tt>Use</tt>
+never change after being set up, setters of <tt>Use::Prev</tt> must re-tag the
+new <tt>Use**</tt> on every modification. Accordingly getters must strip the
+tag bits.</p>
+<p>
+For layout b) instead of the <tt>User</tt> we find a pointer (<tt>User*</tt> with LSBit set).
+Following this pointer brings us to the <tt>User</tt>. A portable trick ensures
+that the first bytes of <tt>User</tt> (if interpreted as a pointer) never has
+the LSBit set. (Portability is relying on the fact that all known compilers place the
+<tt>vptr</tt> in the first word of the instances.)</p>
+
+</div>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="coreclasses">The Core LLVM Class Hierarchy Reference </a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+<p><tt>#include "<a href="/doxygen/Type_8h-source.html">llvm/Type.h</a>"</tt>
+<br>doxygen info: <a href="/doxygen/classllvm_1_1Type.html">Type Class</a></p>
+
+<p>The Core LLVM classes are the primary means of representing the program
+being inspected or transformed. The core LLVM classes are defined in
+header files in the <tt>include/llvm/</tt> directory, and implemented in
+the <tt>lib/VMCore</tt> directory.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="Type">The <tt>Type</tt> class and Derived Types</a>
+</h3>
+
+<div>
+
+ <p><tt>Type</tt> is a superclass of all type classes. Every <tt>Value</tt> has
+ a <tt>Type</tt>. <tt>Type</tt> cannot be instantiated directly but only
+ through its subclasses. Certain primitive types (<tt>VoidType</tt>,
+ <tt>LabelType</tt>, <tt>FloatType</tt> and <tt>DoubleType</tt>) have hidden
+ subclasses. They are hidden because they offer no useful functionality beyond
+ what the <tt>Type</tt> class offers except to distinguish themselves from
+ other subclasses of <tt>Type</tt>.</p>
+ <p>All other types are subclasses of <tt>DerivedType</tt>. Types can be
+ named, but this is not a requirement. There exists exactly
+ one instance of a given shape at any one time. This allows type equality to
+ be performed with address equality of the Type Instance. That is, given two
+ <tt>Type*</tt> values, the types are identical if the pointers are identical.
+ </p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="m_Type">Important Public Methods</a>
+</h4>
+
+<div>
+
+<ul>
+ <li><tt>bool isIntegerTy() const</tt>: Returns true for any integer type.</li>
+
+ <li><tt>bool isFloatingPointTy()</tt>: Return true if this is one of the five
+ floating point types.</li>
+
+ <li><tt>bool isSized()</tt>: Return true if the type has known size. Things
+ that don't have a size are abstract types, labels and void.</li>
+
+</ul>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="derivedtypes">Important Derived Types</a>
+</h4>
+<div>
+<dl>
+ <dt><tt>IntegerType</tt></dt>
+ <dd>Subclass of DerivedType that represents integer types of any bit width.
+ Any bit width between <tt>IntegerType::MIN_INT_BITS</tt> (1) and
+ <tt>IntegerType::MAX_INT_BITS</tt> (~8 million) can be represented.
+ <ul>
+ <li><tt>static const IntegerType* get(unsigned NumBits)</tt>: get an integer
+ type of a specific bit width.</li>
+ <li><tt>unsigned getBitWidth() const</tt>: Get the bit width of an integer
+ type.</li>
+ </ul>
+ </dd>
+ <dt><tt>SequentialType</tt></dt>
+ <dd>This is subclassed by ArrayType, PointerType and VectorType.
+ <ul>
+ <li><tt>const Type * getElementType() const</tt>: Returns the type of each
+ of the elements in the sequential type. </li>
+ </ul>
+ </dd>
+ <dt><tt>ArrayType</tt></dt>
+ <dd>This is a subclass of SequentialType and defines the interface for array
+ types.
+ <ul>
+ <li><tt>unsigned getNumElements() const</tt>: Returns the number of
+ elements in the array. </li>
+ </ul>
+ </dd>
+ <dt><tt>PointerType</tt></dt>
+ <dd>Subclass of SequentialType for pointer types.</dd>
+ <dt><tt>VectorType</tt></dt>
+ <dd>Subclass of SequentialType for vector types. A
+ vector type is similar to an ArrayType but is distinguished because it is
+ a first class type whereas ArrayType is not. Vector types are used for
+ vector operations and are usually small vectors of of an integer or floating
+ point type.</dd>
+ <dt><tt>StructType</tt></dt>
+ <dd>Subclass of DerivedTypes for struct types.</dd>
+ <dt><tt><a name="FunctionType">FunctionType</a></tt></dt>
+ <dd>Subclass of DerivedTypes for function types.
+ <ul>
+ <li><tt>bool isVarArg() const</tt>: Returns true if it's a vararg
+ function</li>
+ <li><tt> const Type * getReturnType() const</tt>: Returns the
+ return type of the function.</li>
+ <li><tt>const Type * getParamType (unsigned i)</tt>: Returns
+ the type of the ith parameter.</li>
+ <li><tt> const unsigned getNumParams() const</tt>: Returns the
+ number of formal parameters.</li>
+ </ul>
+ </dd>
+</dl>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="Module">The <tt>Module</tt> class</a>
+</h3>
+
+<div>
+
+<p><tt>#include "<a
+href="/doxygen/Module_8h-source.html">llvm/Module.h</a>"</tt><br> doxygen info:
+<a href="/doxygen/classllvm_1_1Module.html">Module Class</a></p>
+
+<p>The <tt>Module</tt> class represents the top level structure present in LLVM
+programs. An LLVM module is effectively either a translation unit of the
+original program or a combination of several translation units merged by the
+linker. The <tt>Module</tt> class keeps track of a list of <a
+href="#Function"><tt>Function</tt></a>s, a list of <a
+href="#GlobalVariable"><tt>GlobalVariable</tt></a>s, and a <a
+href="#SymbolTable"><tt>SymbolTable</tt></a>. Additionally, it contains a few
+helpful member functions that try to make common operations easy.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="m_Module">Important Public Members of the <tt>Module</tt> class</a>
+</h4>
+
+<div>
+
+<ul>
+ <li><tt>Module::Module(std::string name = "")</tt>
+
+ <p>Constructing a <a href="#Module">Module</a> is easy. You can optionally
+provide a name for it (probably based on the name of the translation unit).</p>
+ </li>
+
+ <li><tt>Module::iterator</tt> - Typedef for function list iterator<br>
+ <tt>Module::const_iterator</tt> - Typedef for const_iterator.<br>
+
+ <tt>begin()</tt>, <tt>end()</tt>
+ <tt>size()</tt>, <tt>empty()</tt>
+
+ <p>These are forwarding methods that make it easy to access the contents of
+ a <tt>Module</tt> object's <a href="#Function"><tt>Function</tt></a>
+ list.</p></li>
+
+ <li><tt>Module::FunctionListType &amp;getFunctionList()</tt>
+
+ <p> Returns the list of <a href="#Function"><tt>Function</tt></a>s. This is
+ necessary to use when you need to update the list or perform a complex
+ action that doesn't have a forwarding method.</p>
+
+ <p><!-- Global Variable --></p></li>
+</ul>
+
+<hr>
+
+<ul>
+ <li><tt>Module::global_iterator</tt> - Typedef for global variable list iterator<br>
+
+ <tt>Module::const_global_iterator</tt> - Typedef for const_iterator.<br>
+
+ <tt>global_begin()</tt>, <tt>global_end()</tt>
+ <tt>global_size()</tt>, <tt>global_empty()</tt>
+
+ <p> These are forwarding methods that make it easy to access the contents of
+ a <tt>Module</tt> object's <a
+ href="#GlobalVariable"><tt>GlobalVariable</tt></a> list.</p></li>
+
+ <li><tt>Module::GlobalListType &amp;getGlobalList()</tt>
+
+ <p>Returns the list of <a
+ href="#GlobalVariable"><tt>GlobalVariable</tt></a>s. This is necessary to
+ use when you need to update the list or perform a complex action that
+ doesn't have a forwarding method.</p>
+
+ <p><!-- Symbol table stuff --> </p></li>
+</ul>
+
+<hr>
+
+<ul>
+ <li><tt><a href="#SymbolTable">SymbolTable</a> *getSymbolTable()</tt>
+
+ <p>Return a reference to the <a href="#SymbolTable"><tt>SymbolTable</tt></a>
+ for this <tt>Module</tt>.</p>
+
+ <p><!-- Convenience methods --></p></li>
+</ul>
+
+<hr>
+
+<ul>
+
+ <li><tt><a href="#Function">Function</a> *getFunction(StringRef Name) const
+ </tt>
+
+ <p>Look up the specified function in the <tt>Module</tt> <a
+ href="#SymbolTable"><tt>SymbolTable</tt></a>. If it does not exist, return
+ <tt>null</tt>.</p></li>
+
+ <li><tt><a href="#Function">Function</a> *getOrInsertFunction(const
+ std::string &amp;Name, const <a href="#FunctionType">FunctionType</a> *T)</tt>
+
+ <p>Look up the specified function in the <tt>Module</tt> <a
+ href="#SymbolTable"><tt>SymbolTable</tt></a>. If it does not exist, add an
+ external declaration for the function and return it.</p></li>
+
+ <li><tt>std::string getTypeName(const <a href="#Type">Type</a> *Ty)</tt>
+
+ <p>If there is at least one entry in the <a
+ href="#SymbolTable"><tt>SymbolTable</tt></a> for the specified <a
+ href="#Type"><tt>Type</tt></a>, return it. Otherwise return the empty
+ string.</p></li>
+
+ <li><tt>bool addTypeName(const std::string &amp;Name, const <a
+ href="#Type">Type</a> *Ty)</tt>
+
+ <p>Insert an entry in the <a href="#SymbolTable"><tt>SymbolTable</tt></a>
+ mapping <tt>Name</tt> to <tt>Ty</tt>. If there is already an entry for this
+ name, true is returned and the <a
+ href="#SymbolTable"><tt>SymbolTable</tt></a> is not modified.</p></li>
+</ul>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="Value">The <tt>Value</tt> class</a>
+</h3>
+
+<div>
+
+<p><tt>#include "<a href="/doxygen/Value_8h-source.html">llvm/Value.h</a>"</tt>
+<br>
+doxygen info: <a href="/doxygen/classllvm_1_1Value.html">Value Class</a></p>
+
+<p>The <tt>Value</tt> class is the most important class in the LLVM Source
+base. It represents a typed value that may be used (among other things) as an
+operand to an instruction. There are many different types of <tt>Value</tt>s,
+such as <a href="#Constant"><tt>Constant</tt></a>s,<a
+href="#Argument"><tt>Argument</tt></a>s. Even <a
+href="#Instruction"><tt>Instruction</tt></a>s and <a
+href="#Function"><tt>Function</tt></a>s are <tt>Value</tt>s.</p>
+
+<p>A particular <tt>Value</tt> may be used many times in the LLVM representation
+for a program. For example, an incoming argument to a function (represented
+with an instance of the <a href="#Argument">Argument</a> class) is "used" by
+every instruction in the function that references the argument. To keep track
+of this relationship, the <tt>Value</tt> class keeps a list of all of the <a
+href="#User"><tt>User</tt></a>s that is using it (the <a
+href="#User"><tt>User</tt></a> class is a base class for all nodes in the LLVM
+graph that can refer to <tt>Value</tt>s). This use list is how LLVM represents
+def-use information in the program, and is accessible through the <tt>use_</tt>*
+methods, shown below.</p>
+
+<p>Because LLVM is a typed representation, every LLVM <tt>Value</tt> is typed,
+and this <a href="#Type">Type</a> is available through the <tt>getType()</tt>
+method. In addition, all LLVM values can be named. The "name" of the
+<tt>Value</tt> is a symbolic string printed in the LLVM code:</p>
+
+<div class="doc_code">
+<pre>
+%<b>foo</b> = add i32 1, 2
+</pre>
+</div>
+
+<p><a name="nameWarning">The name of this instruction is "foo".</a> <b>NOTE</b>
+that the name of any value may be missing (an empty string), so names should
+<b>ONLY</b> be used for debugging (making the source code easier to read,
+debugging printouts), they should not be used to keep track of values or map
+between them. For this purpose, use a <tt>std::map</tt> of pointers to the
+<tt>Value</tt> itself instead.</p>
+
+<p>One important aspect of LLVM is that there is no distinction between an SSA
+variable and the operation that produces it. Because of this, any reference to
+the value produced by an instruction (or the value available as an incoming
+argument, for example) is represented as a direct pointer to the instance of
+the class that
+represents this value. Although this may take some getting used to, it
+simplifies the representation and makes it easier to manipulate.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="m_Value">Important Public Members of the <tt>Value</tt> class</a>
+</h4>
+
+<div>
+
+<ul>
+ <li><tt>Value::use_iterator</tt> - Typedef for iterator over the
+use-list<br>
+ <tt>Value::const_use_iterator</tt> - Typedef for const_iterator over
+the use-list<br>
+ <tt>unsigned use_size()</tt> - Returns the number of users of the
+value.<br>
+ <tt>bool use_empty()</tt> - Returns true if there are no users.<br>
+ <tt>use_iterator use_begin()</tt> - Get an iterator to the start of
+the use-list.<br>
+ <tt>use_iterator use_end()</tt> - Get an iterator to the end of the
+use-list.<br>
+ <tt><a href="#User">User</a> *use_back()</tt> - Returns the last
+element in the list.
+ <p> These methods are the interface to access the def-use
+information in LLVM. As with all other iterators in LLVM, the naming
+conventions follow the conventions defined by the <a href="#stl">STL</a>.</p>
+ </li>
+ <li><tt><a href="#Type">Type</a> *getType() const</tt>
+ <p>This method returns the Type of the Value.</p>
+ </li>
+ <li><tt>bool hasName() const</tt><br>
+ <tt>std::string getName() const</tt><br>
+ <tt>void setName(const std::string &amp;Name)</tt>
+ <p> This family of methods is used to access and assign a name to a <tt>Value</tt>,
+be aware of the <a href="#nameWarning">precaution above</a>.</p>
+ </li>
+ <li><tt>void replaceAllUsesWith(Value *V)</tt>
+
+ <p>This method traverses the use list of a <tt>Value</tt> changing all <a
+ href="#User"><tt>User</tt>s</a> of the current value to refer to
+ "<tt>V</tt>" instead. For example, if you detect that an instruction always
+ produces a constant value (for example through constant folding), you can
+ replace all uses of the instruction with the constant like this:</p>
+
+<div class="doc_code">
+<pre>
+Inst-&gt;replaceAllUsesWith(ConstVal);
+</pre>
+</div>
+
+</ul>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="User">The <tt>User</tt> class</a>
+</h3>
+
+<div>
+
+<p>
+<tt>#include "<a href="/doxygen/User_8h-source.html">llvm/User.h</a>"</tt><br>
+doxygen info: <a href="/doxygen/classllvm_1_1User.html">User Class</a><br>
+Superclass: <a href="#Value"><tt>Value</tt></a></p>
+
+<p>The <tt>User</tt> class is the common base class of all LLVM nodes that may
+refer to <a href="#Value"><tt>Value</tt></a>s. It exposes a list of "Operands"
+that are all of the <a href="#Value"><tt>Value</tt></a>s that the User is
+referring to. The <tt>User</tt> class itself is a subclass of
+<tt>Value</tt>.</p>
+
+<p>The operands of a <tt>User</tt> point directly to the LLVM <a
+href="#Value"><tt>Value</tt></a> that it refers to. Because LLVM uses Static
+Single Assignment (SSA) form, there can only be one definition referred to,
+allowing this direct connection. This connection provides the use-def
+information in LLVM.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="m_User">Important Public Members of the <tt>User</tt> class</a>
+</h4>
+
+<div>
+
+<p>The <tt>User</tt> class exposes the operand list in two ways: through
+an index access interface and through an iterator based interface.</p>
+
+<ul>
+ <li><tt>Value *getOperand(unsigned i)</tt><br>
+ <tt>unsigned getNumOperands()</tt>
+ <p> These two methods expose the operands of the <tt>User</tt> in a
+convenient form for direct access.</p></li>
+
+ <li><tt>User::op_iterator</tt> - Typedef for iterator over the operand
+list<br>
+ <tt>op_iterator op_begin()</tt> - Get an iterator to the start of
+the operand list.<br>
+ <tt>op_iterator op_end()</tt> - Get an iterator to the end of the
+operand list.
+ <p> Together, these methods make up the iterator based interface to
+the operands of a <tt>User</tt>.</p></li>
+</ul>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="Instruction">The <tt>Instruction</tt> class</a>
+</h3>
+
+<div>
+
+<p><tt>#include "</tt><tt><a
+href="/doxygen/Instruction_8h-source.html">llvm/Instruction.h</a>"</tt><br>
+doxygen info: <a href="/doxygen/classllvm_1_1Instruction.html">Instruction Class</a><br>
+Superclasses: <a href="#User"><tt>User</tt></a>, <a
+href="#Value"><tt>Value</tt></a></p>
+
+<p>The <tt>Instruction</tt> class is the common base class for all LLVM
+instructions. It provides only a few methods, but is a very commonly used
+class. The primary data tracked by the <tt>Instruction</tt> class itself is the
+opcode (instruction type) and the parent <a
+href="#BasicBlock"><tt>BasicBlock</tt></a> the <tt>Instruction</tt> is embedded
+into. To represent a specific type of instruction, one of many subclasses of
+<tt>Instruction</tt> are used.</p>
+
+<p> Because the <tt>Instruction</tt> class subclasses the <a
+href="#User"><tt>User</tt></a> class, its operands can be accessed in the same
+way as for other <a href="#User"><tt>User</tt></a>s (with the
+<tt>getOperand()</tt>/<tt>getNumOperands()</tt> and
+<tt>op_begin()</tt>/<tt>op_end()</tt> methods).</p> <p> An important file for
+the <tt>Instruction</tt> class is the <tt>llvm/Instruction.def</tt> file. This
+file contains some meta-data about the various different types of instructions
+in LLVM. It describes the enum values that are used as opcodes (for example
+<tt>Instruction::Add</tt> and <tt>Instruction::ICmp</tt>), as well as the
+concrete sub-classes of <tt>Instruction</tt> that implement the instruction (for
+example <tt><a href="#BinaryOperator">BinaryOperator</a></tt> and <tt><a
+href="#CmpInst">CmpInst</a></tt>). Unfortunately, the use of macros in
+this file confuses doxygen, so these enum values don't show up correctly in the
+<a href="/doxygen/classllvm_1_1Instruction.html">doxygen output</a>.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="s_Instruction">
+ Important Subclasses of the <tt>Instruction</tt> class
+ </a>
+</h4>
+<div>
+ <ul>
+ <li><tt><a name="BinaryOperator">BinaryOperator</a></tt>
+ <p>This subclasses represents all two operand instructions whose operands
+ must be the same type, except for the comparison instructions.</p></li>
+ <li><tt><a name="CastInst">CastInst</a></tt>
+ <p>This subclass is the parent of the 12 casting instructions. It provides
+ common operations on cast instructions.</p>
+ <li><tt><a name="CmpInst">CmpInst</a></tt>
+ <p>This subclass respresents the two comparison instructions,
+ <a href="LangRef.html#i_icmp">ICmpInst</a> (integer opreands), and
+ <a href="LangRef.html#i_fcmp">FCmpInst</a> (floating point operands).</p>
+ <li><tt><a name="TerminatorInst">TerminatorInst</a></tt>
+ <p>This subclass is the parent of all terminator instructions (those which
+ can terminate a block).</p>
+ </ul>
+ </div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="m_Instruction">
+ Important Public Members of the <tt>Instruction</tt> class
+ </a>
+</h4>
+
+<div>
+
+<ul>
+ <li><tt><a href="#BasicBlock">BasicBlock</a> *getParent()</tt>
+ <p>Returns the <a href="#BasicBlock"><tt>BasicBlock</tt></a> that
+this <tt>Instruction</tt> is embedded into.</p></li>
+ <li><tt>bool mayWriteToMemory()</tt>
+ <p>Returns true if the instruction writes to memory, i.e. it is a
+ <tt>call</tt>,<tt>free</tt>,<tt>invoke</tt>, or <tt>store</tt>.</p></li>
+ <li><tt>unsigned getOpcode()</tt>
+ <p>Returns the opcode for the <tt>Instruction</tt>.</p></li>
+ <li><tt><a href="#Instruction">Instruction</a> *clone() const</tt>
+ <p>Returns another instance of the specified instruction, identical
+in all ways to the original except that the instruction has no parent
+(ie it's not embedded into a <a href="#BasicBlock"><tt>BasicBlock</tt></a>),
+and it has no name</p></li>
+</ul>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="Constant">The <tt>Constant</tt> class and subclasses</a>
+</h3>
+
+<div>
+
+<p>Constant represents a base class for different types of constants. It
+is subclassed by ConstantInt, ConstantArray, etc. for representing
+the various types of Constants. <a href="#GlobalValue">GlobalValue</a> is also
+a subclass, which represents the address of a global variable or function.
+</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>Important Subclasses of Constant</h4>
+<div>
+<ul>
+ <li>ConstantInt : This subclass of Constant represents an integer constant of
+ any width.
+ <ul>
+ <li><tt>const APInt&amp; getValue() const</tt>: Returns the underlying
+ value of this constant, an APInt value.</li>
+ <li><tt>int64_t getSExtValue() const</tt>: Converts the underlying APInt
+ value to an int64_t via sign extension. If the value (not the bit width)
+ of the APInt is too large to fit in an int64_t, an assertion will result.
+ For this reason, use of this method is discouraged.</li>
+ <li><tt>uint64_t getZExtValue() const</tt>: Converts the underlying APInt
+ value to a uint64_t via zero extension. IF the value (not the bit width)
+ of the APInt is too large to fit in a uint64_t, an assertion will result.
+ For this reason, use of this method is discouraged.</li>
+ <li><tt>static ConstantInt* get(const APInt&amp; Val)</tt>: Returns the
+ ConstantInt object that represents the value provided by <tt>Val</tt>.
+ The type is implied as the IntegerType that corresponds to the bit width
+ of <tt>Val</tt>.</li>
+ <li><tt>static ConstantInt* get(const Type *Ty, uint64_t Val)</tt>:
+ Returns the ConstantInt object that represents the value provided by
+ <tt>Val</tt> for integer type <tt>Ty</tt>.</li>
+ </ul>
+ </li>
+ <li>ConstantFP : This class represents a floating point constant.
+ <ul>
+ <li><tt>double getValue() const</tt>: Returns the underlying value of
+ this constant. </li>
+ </ul>
+ </li>
+ <li>ConstantArray : This represents a constant array.
+ <ul>
+ <li><tt>const std::vector&lt;Use&gt; &amp;getValues() const</tt>: Returns
+ a vector of component constants that makeup this array. </li>
+ </ul>
+ </li>
+ <li>ConstantStruct : This represents a constant struct.
+ <ul>
+ <li><tt>const std::vector&lt;Use&gt; &amp;getValues() const</tt>: Returns
+ a vector of component constants that makeup this array. </li>
+ </ul>
+ </li>
+ <li>GlobalValue : This represents either a global variable or a function. In
+ either case, the value is a constant fixed address (after linking).
+ </li>
+</ul>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="GlobalValue">The <tt>GlobalValue</tt> class</a>
+</h3>
+
+<div>
+
+<p><tt>#include "<a
+href="/doxygen/GlobalValue_8h-source.html">llvm/GlobalValue.h</a>"</tt><br>
+doxygen info: <a href="/doxygen/classllvm_1_1GlobalValue.html">GlobalValue
+Class</a><br>
+Superclasses: <a href="#Constant"><tt>Constant</tt></a>,
+<a href="#User"><tt>User</tt></a>, <a href="#Value"><tt>Value</tt></a></p>
+
+<p>Global values (<a href="#GlobalVariable"><tt>GlobalVariable</tt></a>s or <a
+href="#Function"><tt>Function</tt></a>s) are the only LLVM values that are
+visible in the bodies of all <a href="#Function"><tt>Function</tt></a>s.
+Because they are visible at global scope, they are also subject to linking with
+other globals defined in different translation units. To control the linking
+process, <tt>GlobalValue</tt>s know their linkage rules. Specifically,
+<tt>GlobalValue</tt>s know whether they have internal or external linkage, as
+defined by the <tt>LinkageTypes</tt> enumeration.</p>
+
+<p>If a <tt>GlobalValue</tt> has internal linkage (equivalent to being
+<tt>static</tt> in C), it is not visible to code outside the current translation
+unit, and does not participate in linking. If it has external linkage, it is
+visible to external code, and does participate in linking. In addition to
+linkage information, <tt>GlobalValue</tt>s keep track of which <a
+href="#Module"><tt>Module</tt></a> they are currently part of.</p>
+
+<p>Because <tt>GlobalValue</tt>s are memory objects, they are always referred to
+by their <b>address</b>. As such, the <a href="#Type"><tt>Type</tt></a> of a
+global is always a pointer to its contents. It is important to remember this
+when using the <tt>GetElementPtrInst</tt> instruction because this pointer must
+be dereferenced first. For example, if you have a <tt>GlobalVariable</tt> (a
+subclass of <tt>GlobalValue)</tt> that is an array of 24 ints, type <tt>[24 x
+i32]</tt>, then the <tt>GlobalVariable</tt> is a pointer to that array. Although
+the address of the first element of this array and the value of the
+<tt>GlobalVariable</tt> are the same, they have different types. The
+<tt>GlobalVariable</tt>'s type is <tt>[24 x i32]</tt>. The first element's type
+is <tt>i32.</tt> Because of this, accessing a global value requires you to
+dereference the pointer with <tt>GetElementPtrInst</tt> first, then its elements
+can be accessed. This is explained in the <a href="LangRef.html#globalvars">LLVM
+Language Reference Manual</a>.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="m_GlobalValue">
+ Important Public Members of the <tt>GlobalValue</tt> class
+ </a>
+</h4>
+
+<div>
+
+<ul>
+ <li><tt>bool hasInternalLinkage() const</tt><br>
+ <tt>bool hasExternalLinkage() const</tt><br>
+ <tt>void setInternalLinkage(bool HasInternalLinkage)</tt>
+ <p> These methods manipulate the linkage characteristics of the <tt>GlobalValue</tt>.</p>
+ <p> </p>
+ </li>
+ <li><tt><a href="#Module">Module</a> *getParent()</tt>
+ <p> This returns the <a href="#Module"><tt>Module</tt></a> that the
+GlobalValue is currently embedded into.</p></li>
+</ul>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="Function">The <tt>Function</tt> class</a>
+</h3>
+
+<div>
+
+<p><tt>#include "<a
+href="/doxygen/Function_8h-source.html">llvm/Function.h</a>"</tt><br> doxygen
+info: <a href="/doxygen/classllvm_1_1Function.html">Function Class</a><br>
+Superclasses: <a href="#GlobalValue"><tt>GlobalValue</tt></a>,
+<a href="#Constant"><tt>Constant</tt></a>,
+<a href="#User"><tt>User</tt></a>,
+<a href="#Value"><tt>Value</tt></a></p>
+
+<p>The <tt>Function</tt> class represents a single procedure in LLVM. It is
+actually one of the more complex classes in the LLVM hierarchy because it must
+keep track of a large amount of data. The <tt>Function</tt> class keeps track
+of a list of <a href="#BasicBlock"><tt>BasicBlock</tt></a>s, a list of formal
+<a href="#Argument"><tt>Argument</tt></a>s, and a
+<a href="#SymbolTable"><tt>SymbolTable</tt></a>.</p>
+
+<p>The list of <a href="#BasicBlock"><tt>BasicBlock</tt></a>s is the most
+commonly used part of <tt>Function</tt> objects. The list imposes an implicit
+ordering of the blocks in the function, which indicate how the code will be
+laid out by the backend. Additionally, the first <a
+href="#BasicBlock"><tt>BasicBlock</tt></a> is the implicit entry node for the
+<tt>Function</tt>. It is not legal in LLVM to explicitly branch to this initial
+block. There are no implicit exit nodes, and in fact there may be multiple exit
+nodes from a single <tt>Function</tt>. If the <a
+href="#BasicBlock"><tt>BasicBlock</tt></a> list is empty, this indicates that
+the <tt>Function</tt> is actually a function declaration: the actual body of the
+function hasn't been linked in yet.</p>
+
+<p>In addition to a list of <a href="#BasicBlock"><tt>BasicBlock</tt></a>s, the
+<tt>Function</tt> class also keeps track of the list of formal <a
+href="#Argument"><tt>Argument</tt></a>s that the function receives. This
+container manages the lifetime of the <a href="#Argument"><tt>Argument</tt></a>
+nodes, just like the <a href="#BasicBlock"><tt>BasicBlock</tt></a> list does for
+the <a href="#BasicBlock"><tt>BasicBlock</tt></a>s.</p>
+
+<p>The <a href="#SymbolTable"><tt>SymbolTable</tt></a> is a very rarely used
+LLVM feature that is only used when you have to look up a value by name. Aside
+from that, the <a href="#SymbolTable"><tt>SymbolTable</tt></a> is used
+internally to make sure that there are not conflicts between the names of <a
+href="#Instruction"><tt>Instruction</tt></a>s, <a
+href="#BasicBlock"><tt>BasicBlock</tt></a>s, or <a
+href="#Argument"><tt>Argument</tt></a>s in the function body.</p>
+
+<p>Note that <tt>Function</tt> is a <a href="#GlobalValue">GlobalValue</a>
+and therefore also a <a href="#Constant">Constant</a>. The value of the function
+is its address (after linking) which is guaranteed to be constant.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="m_Function">
+ Important Public Members of the <tt>Function</tt> class
+ </a>
+</h4>
+
+<div>
+
+<ul>
+ <li><tt>Function(const </tt><tt><a href="#FunctionType">FunctionType</a>
+ *Ty, LinkageTypes Linkage, const std::string &amp;N = "", Module* Parent = 0)</tt>
+
+ <p>Constructor used when you need to create new <tt>Function</tt>s to add
+ the program. The constructor must specify the type of the function to
+ create and what type of linkage the function should have. The <a
+ href="#FunctionType"><tt>FunctionType</tt></a> argument
+ specifies the formal arguments and return value for the function. The same
+ <a href="#FunctionType"><tt>FunctionType</tt></a> value can be used to
+ create multiple functions. The <tt>Parent</tt> argument specifies the Module
+ in which the function is defined. If this argument is provided, the function
+ will automatically be inserted into that module's list of
+ functions.</p></li>
+
+ <li><tt>bool isDeclaration()</tt>
+
+ <p>Return whether or not the <tt>Function</tt> has a body defined. If the
+ function is "external", it does not have a body, and thus must be resolved
+ by linking with a function defined in a different translation unit.</p></li>
+
+ <li><tt>Function::iterator</tt> - Typedef for basic block list iterator<br>
+ <tt>Function::const_iterator</tt> - Typedef for const_iterator.<br>
+
+ <tt>begin()</tt>, <tt>end()</tt>
+ <tt>size()</tt>, <tt>empty()</tt>
+
+ <p>These are forwarding methods that make it easy to access the contents of
+ a <tt>Function</tt> object's <a href="#BasicBlock"><tt>BasicBlock</tt></a>
+ list.</p></li>
+
+ <li><tt>Function::BasicBlockListType &amp;getBasicBlockList()</tt>
+
+ <p>Returns the list of <a href="#BasicBlock"><tt>BasicBlock</tt></a>s. This
+ is necessary to use when you need to update the list or perform a complex
+ action that doesn't have a forwarding method.</p></li>
+
+ <li><tt>Function::arg_iterator</tt> - Typedef for the argument list
+iterator<br>
+ <tt>Function::const_arg_iterator</tt> - Typedef for const_iterator.<br>
+
+ <tt>arg_begin()</tt>, <tt>arg_end()</tt>
+ <tt>arg_size()</tt>, <tt>arg_empty()</tt>
+
+ <p>These are forwarding methods that make it easy to access the contents of
+ a <tt>Function</tt> object's <a href="#Argument"><tt>Argument</tt></a>
+ list.</p></li>
+
+ <li><tt>Function::ArgumentListType &amp;getArgumentList()</tt>
+
+ <p>Returns the list of <a href="#Argument"><tt>Argument</tt></a>s. This is
+ necessary to use when you need to update the list or perform a complex
+ action that doesn't have a forwarding method.</p></li>
+
+ <li><tt><a href="#BasicBlock">BasicBlock</a> &amp;getEntryBlock()</tt>
+
+ <p>Returns the entry <a href="#BasicBlock"><tt>BasicBlock</tt></a> for the
+ function. Because the entry block for the function is always the first
+ block, this returns the first block of the <tt>Function</tt>.</p></li>
+
+ <li><tt><a href="#Type">Type</a> *getReturnType()</tt><br>
+ <tt><a href="#FunctionType">FunctionType</a> *getFunctionType()</tt>
+
+ <p>This traverses the <a href="#Type"><tt>Type</tt></a> of the
+ <tt>Function</tt> and returns the return type of the function, or the <a
+ href="#FunctionType"><tt>FunctionType</tt></a> of the actual
+ function.</p></li>
+
+ <li><tt><a href="#SymbolTable">SymbolTable</a> *getSymbolTable()</tt>
+
+ <p> Return a pointer to the <a href="#SymbolTable"><tt>SymbolTable</tt></a>
+ for this <tt>Function</tt>.</p></li>
+</ul>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="GlobalVariable">The <tt>GlobalVariable</tt> class</a>
+</h3>
+
+<div>
+
+<p><tt>#include "<a
+href="/doxygen/GlobalVariable_8h-source.html">llvm/GlobalVariable.h</a>"</tt>
+<br>
+doxygen info: <a href="/doxygen/classllvm_1_1GlobalVariable.html">GlobalVariable
+ Class</a><br>
+Superclasses: <a href="#GlobalValue"><tt>GlobalValue</tt></a>,
+<a href="#Constant"><tt>Constant</tt></a>,
+<a href="#User"><tt>User</tt></a>,
+<a href="#Value"><tt>Value</tt></a></p>
+
+<p>Global variables are represented with the (surprise surprise)
+<tt>GlobalVariable</tt> class. Like functions, <tt>GlobalVariable</tt>s are also
+subclasses of <a href="#GlobalValue"><tt>GlobalValue</tt></a>, and as such are
+always referenced by their address (global values must live in memory, so their
+"name" refers to their constant address). See
+<a href="#GlobalValue"><tt>GlobalValue</tt></a> for more on this. Global
+variables may have an initial value (which must be a
+<a href="#Constant"><tt>Constant</tt></a>), and if they have an initializer,
+they may be marked as "constant" themselves (indicating that their contents
+never change at runtime).</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="m_GlobalVariable">
+ Important Public Members of the <tt>GlobalVariable</tt> class
+ </a>
+</h4>
+
+<div>
+
+<ul>
+ <li><tt>GlobalVariable(const </tt><tt><a href="#Type">Type</a> *Ty, bool
+ isConstant, LinkageTypes&amp; Linkage, <a href="#Constant">Constant</a>
+ *Initializer = 0, const std::string &amp;Name = "", Module* Parent = 0)</tt>
+
+ <p>Create a new global variable of the specified type. If
+ <tt>isConstant</tt> is true then the global variable will be marked as
+ unchanging for the program. The Linkage parameter specifies the type of
+ linkage (internal, external, weak, linkonce, appending) for the variable.
+ If the linkage is InternalLinkage, WeakAnyLinkage, WeakODRLinkage,
+ LinkOnceAnyLinkage or LinkOnceODRLinkage,&nbsp; then the resultant
+ global variable will have internal linkage. AppendingLinkage concatenates
+ together all instances (in different translation units) of the variable
+ into a single variable but is only applicable to arrays. &nbsp;See
+ the <a href="LangRef.html#modulestructure">LLVM Language Reference</a> for
+ further details on linkage types. Optionally an initializer, a name, and the
+ module to put the variable into may be specified for the global variable as
+ well.</p></li>
+
+ <li><tt>bool isConstant() const</tt>
+
+ <p>Returns true if this is a global variable that is known not to
+ be modified at runtime.</p></li>
+
+ <li><tt>bool hasInitializer()</tt>
+
+ <p>Returns true if this <tt>GlobalVariable</tt> has an intializer.</p></li>
+
+ <li><tt><a href="#Constant">Constant</a> *getInitializer()</tt>
+
+ <p>Returns the initial value for a <tt>GlobalVariable</tt>. It is not legal
+ to call this method if there is no initializer.</p></li>
+</ul>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="BasicBlock">The <tt>BasicBlock</tt> class</a>
+</h3>
+
+<div>
+
+<p><tt>#include "<a
+href="/doxygen/BasicBlock_8h-source.html">llvm/BasicBlock.h</a>"</tt><br>
+doxygen info: <a href="/doxygen/classllvm_1_1BasicBlock.html">BasicBlock
+Class</a><br>
+Superclass: <a href="#Value"><tt>Value</tt></a></p>
+
+<p>This class represents a single entry single exit section of the code,
+commonly known as a basic block by the compiler community. The
+<tt>BasicBlock</tt> class maintains a list of <a
+href="#Instruction"><tt>Instruction</tt></a>s, which form the body of the block.
+Matching the language definition, the last element of this list of instructions
+is always a terminator instruction (a subclass of the <a
+href="#TerminatorInst"><tt>TerminatorInst</tt></a> class).</p>
+
+<p>In addition to tracking the list of instructions that make up the block, the
+<tt>BasicBlock</tt> class also keeps track of the <a
+href="#Function"><tt>Function</tt></a> that it is embedded into.</p>
+
+<p>Note that <tt>BasicBlock</tt>s themselves are <a
+href="#Value"><tt>Value</tt></a>s, because they are referenced by instructions
+like branches and can go in the switch tables. <tt>BasicBlock</tt>s have type
+<tt>label</tt>.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="m_BasicBlock">
+ Important Public Members of the <tt>BasicBlock</tt> class
+ </a>
+</h4>
+
+<div>
+<ul>
+
+<li><tt>BasicBlock(const std::string &amp;Name = "", </tt><tt><a
+ href="#Function">Function</a> *Parent = 0)</tt>
+
+<p>The <tt>BasicBlock</tt> constructor is used to create new basic blocks for
+insertion into a function. The constructor optionally takes a name for the new
+block, and a <a href="#Function"><tt>Function</tt></a> to insert it into. If
+the <tt>Parent</tt> parameter is specified, the new <tt>BasicBlock</tt> is
+automatically inserted at the end of the specified <a
+href="#Function"><tt>Function</tt></a>, if not specified, the BasicBlock must be
+manually inserted into the <a href="#Function"><tt>Function</tt></a>.</p></li>
+
+<li><tt>BasicBlock::iterator</tt> - Typedef for instruction list iterator<br>
+<tt>BasicBlock::const_iterator</tt> - Typedef for const_iterator.<br>
+<tt>begin()</tt>, <tt>end()</tt>, <tt>front()</tt>, <tt>back()</tt>,
+<tt>size()</tt>, <tt>empty()</tt>
+STL-style functions for accessing the instruction list.
+
+<p>These methods and typedefs are forwarding functions that have the same
+semantics as the standard library methods of the same names. These methods
+expose the underlying instruction list of a basic block in a way that is easy to
+manipulate. To get the full complement of container operations (including
+operations to update the list), you must use the <tt>getInstList()</tt>
+method.</p></li>
+
+<li><tt>BasicBlock::InstListType &amp;getInstList()</tt>
+
+<p>This method is used to get access to the underlying container that actually
+holds the Instructions. This method must be used when there isn't a forwarding
+function in the <tt>BasicBlock</tt> class for the operation that you would like
+to perform. Because there are no forwarding functions for "updating"
+operations, you need to use this if you want to update the contents of a
+<tt>BasicBlock</tt>.</p></li>
+
+<li><tt><a href="#Function">Function</a> *getParent()</tt>
+
+<p> Returns a pointer to <a href="#Function"><tt>Function</tt></a> the block is
+embedded into, or a null pointer if it is homeless.</p></li>
+
+<li><tt><a href="#TerminatorInst">TerminatorInst</a> *getTerminator()</tt>
+
+<p> Returns a pointer to the terminator instruction that appears at the end of
+the <tt>BasicBlock</tt>. If there is no terminator instruction, or if the last
+instruction in the block is not a terminator, then a null pointer is
+returned.</p></li>
+
+</ul>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="Argument">The <tt>Argument</tt> class</a>
+</h3>
+
+<div>
+
+<p>This subclass of Value defines the interface for incoming formal
+arguments to a function. A Function maintains a list of its formal
+arguments. An argument has a pointer to the parent Function.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01 Strict"></a>
+
+ <a href="mailto:dhurjati@cs.uiuc.edu">Dinakar Dhurjati</a> and
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+
+</body>
+</html>
diff --git a/docs/Projects.rst b/docs/Projects.rst
new file mode 100644
index 00000000000..63132887a59
--- /dev/null
+++ b/docs/Projects.rst
@@ -0,0 +1,327 @@
+.. _projects:
+
+========================
+Creating an LLVM Project
+========================
+
+.. contents::
+ :local:
+
+Overview
+========
+
+The LLVM build system is designed to facilitate the building of third party
+projects that use LLVM header files, libraries, and tools. In order to use
+these facilities, a ``Makefile`` from a project must do the following things:
+
+* Set ``make`` variables. There are several variables that a ``Makefile`` needs
+ to set to use the LLVM build system:
+
+ * ``PROJECT_NAME`` - The name by which your project is known.
+ * ``LLVM_SRC_ROOT`` - The root of the LLVM source tree.
+ * ``LLVM_OBJ_ROOT`` - The root of the LLVM object tree.
+ * ``PROJ_SRC_ROOT`` - The root of the project's source tree.
+ * ``PROJ_OBJ_ROOT`` - The root of the project's object tree.
+ * ``PROJ_INSTALL_ROOT`` - The root installation directory.
+ * ``LEVEL`` - The relative path from the current directory to the
+ project's root ``($PROJ_OBJ_ROOT)``.
+
+* Include ``Makefile.config`` from ``$(LLVM_OBJ_ROOT)``.
+
+* Include ``Makefile.rules`` from ``$(LLVM_SRC_ROOT)``.
+
+There are two ways that you can set all of these variables:
+
+* You can write your own ``Makefiles`` which hard-code these values.
+
+* You can use the pre-made LLVM sample project. This sample project includes
+ ``Makefiles``, a configure script that can be used to configure the location
+ of LLVM, and the ability to support multiple object directories from a single
+ source directory.
+
+This document assumes that you will base your project on the LLVM sample project
+found in ``llvm/projects/sample``. If you want to devise your own build system,
+studying the sample project and LLVM ``Makefiles`` will probably provide enough
+information on how to write your own ``Makefiles``.
+
+Create a Project from the Sample Project
+========================================
+
+Follow these simple steps to start your project:
+
+1. Copy the ``llvm/projects/sample`` directory to any place of your choosing.
+ You can place it anywhere you like. Rename the directory to match the name
+ of your project.
+
+2. If you downloaded LLVM using Subversion, remove all the directories named
+ ``.svn`` (and all the files therein) from your project's new source tree.
+ This will keep Subversion from thinking that your project is inside
+ ``llvm/trunk/projects/sample``.
+
+3. Add your source code and Makefiles to your source tree.
+
+4. If you want your project to be configured with the ``configure`` script then
+ you need to edit ``autoconf/configure.ac`` as follows:
+
+ * **AC_INIT** - Place the name of your project, its version number and a
+ contact email address for your project as the arguments to this macro
+
+ * **AC_CONFIG_AUX_DIR** - If your project isn't in the ``llvm/projects``
+ directory then you might need to adjust this so that it specifies a
+ relative path to the ``llvm/autoconf`` directory.
+
+ * **LLVM_CONFIG_PROJECT** - Just leave this alone.
+
+ * **AC_CONFIG_SRCDIR** - Specify a path to a file name that identifies your
+ project; or just leave it at ``Makefile.common.in``.
+
+ * **AC_CONFIG_FILES** - Do not change.
+
+ * **AC_CONFIG_MAKEFILE** - Use one of these macros for each Makefile that
+ your project uses. This macro arranges for your makefiles to be copied from
+ the source directory, unmodified, to the build directory.
+
+5. After updating ``autoconf/configure.ac``, regenerate the configure script
+ with these commands. (You must be using ``Autoconf`` version 2.59 or later
+ and your ``aclocal`` version should be 1.9 or later.)
+
+ .. code-block:: bash
+
+ % cd autoconf
+ % ./AutoRegen.sh
+
+6. Run ``configure`` in the directory in which you want to place object code.
+ Use the following options to tell your project where it can find LLVM:
+
+ ``--with-llvmsrc=<directory>``
+ Tell your project where the LLVM source tree is located.
+
+ ``--with-llvmobj=<directory>``
+ Tell your project where the LLVM object tree is located.
+
+ ``--prefix=<directory>``
+ Tell your project where it should get installed.
+
+That's it! Now all you have to do is type ``gmake`` (or ``make`` if you're on a
+GNU/Linux system) in the root of your object directory, and your project should
+build.
+
+Source Tree Layout
+==================
+
+In order to use the LLVM build system, you will want to organize your source
+code so that it can benefit from the build system's features. Mainly, you want
+your source tree layout to look similar to the LLVM source tree layout. The
+best way to do this is to just copy the project tree from
+``llvm/projects/sample`` and modify it to meet your needs, but you can certainly
+add to it if you want.
+
+Underneath your top level directory, you should have the following directories:
+
+**lib**
+
+ This subdirectory should contain all of your library source code. For each
+ library that you build, you will have one directory in **lib** that will
+ contain that library's source code.
+
+ Libraries can be object files, archives, or dynamic libraries. The **lib**
+ directory is just a convenient place for libraries as it places them all in
+ a directory from which they can be linked later.
+
+**include**
+
+ This subdirectory should contain any header files that are global to your
+ project. By global, we mean that they are used by more than one library or
+ executable of your project.
+
+ By placing your header files in **include**, they will be found
+ automatically by the LLVM build system. For example, if you have a file
+ **include/jazz/note.h**, then your source files can include it simply with
+ **#include "jazz/note.h"**.
+
+**tools**
+
+ This subdirectory should contain all of your source code for executables.
+ For each program that you build, you will have one directory in **tools**
+ that will contain that program's source code.
+
+**test**
+
+ This subdirectory should contain tests that verify that your code works
+ correctly. Automated tests are especially useful.
+
+ Currently, the LLVM build system provides basic support for tests. The LLVM
+ system provides the following:
+
+* LLVM provides a ``tcl`` procedure that is used by ``Dejagnu`` to run tests.
+ It can be found in ``llvm/lib/llvm-dg.exp``. This test procedure uses ``RUN``
+ lines in the actual test case to determine how to run the test. See the
+ `TestingGuide <TestingGuide.html>`_ for more details. You can easily write
+ Makefile support similar to the Makefiles in ``llvm/test`` to use ``Dejagnu``
+ to run your project's tests.
+
+* LLVM contains an optional package called ``llvm-test``, which provides
+ benchmarks and programs that are known to compile with the Clang front
+ end. You can use these programs to test your code, gather statistical
+ information, and compare it to the current LLVM performance statistics.
+
+ Currently, there is no way to hook your tests directly into the ``llvm/test``
+ testing harness. You will simply need to find a way to use the source
+ provided within that directory on your own.
+
+Typically, you will want to build your **lib** directory first followed by your
+**tools** directory.
+
+Writing LLVM Style Makefiles
+============================
+
+The LLVM build system provides a convenient way to build libraries and
+executables. Most of your project Makefiles will only need to define a few
+variables. Below is a list of the variables one can set and what they can
+do:
+
+Required Variables
+------------------
+
+``LEVEL``
+
+ This variable is the relative path from this ``Makefile`` to the top
+ directory of your project's source code. For example, if your source code
+ is in ``/tmp/src``, then the ``Makefile`` in ``/tmp/src/jump/high``
+ would set ``LEVEL`` to ``"../.."``.
+
+Variables for Building Subdirectories
+-------------------------------------
+
+``DIRS``
+
+ This is a space separated list of subdirectories that should be built. They
+ will be built, one at a time, in the order specified.
+
+``PARALLEL_DIRS``
+
+ This is a list of directories that can be built in parallel. These will be
+ built after the directories in DIRS have been built.
+
+``OPTIONAL_DIRS``
+
+ This is a list of directories that can be built if they exist, but will not
+ cause an error if they do not exist. They are built serially in the order
+ in which they are listed.
+
+Variables for Building Libraries
+--------------------------------
+
+``LIBRARYNAME``
+
+ This variable contains the base name of the library that will be built. For
+ example, to build a library named ``libsample.a``, ``LIBRARYNAME`` should
+ be set to ``sample``.
+
+``BUILD_ARCHIVE``
+
+ By default, a library is a ``.o`` file that is linked directly into a
+ program. To build an archive (also known as a static library), set the
+ ``BUILD_ARCHIVE`` variable.
+
+``SHARED_LIBRARY``
+
+ If ``SHARED_LIBRARY`` is defined in your Makefile, a shared (or dynamic)
+ library will be built.
+
+Variables for Building Programs
+-------------------------------
+
+``TOOLNAME``
+
+ This variable contains the name of the program that will be built. For
+ example, to build an executable named ``sample``, ``TOOLNAME`` should be set
+ to ``sample``.
+
+``USEDLIBS``
+
+ This variable holds a space separated list of libraries that should be
+ linked into the program. These libraries must be libraries that come from
+ your **lib** directory. The libraries must be specified without their
+ ``lib`` prefix. For example, to link ``libsample.a``, you would set
+ ``USEDLIBS`` to ``sample.a``.
+
+ Note that this works only for statically linked libraries.
+
+``LLVMLIBS``
+
+ This variable holds a space separated list of libraries that should be
+ linked into the program. These libraries must be LLVM libraries. The
+ libraries must be specified without their ``lib`` prefix. For example, to
+ link with a driver that performs an IR transformation you might set
+ ``LLVMLIBS`` to this minimal set of libraries ``LLVMSupport.a LLVMCore.a
+ LLVMBitReader.a LLVMAsmParser.a LLVMAnalysis.a LLVMTransformUtils.a
+ LLVMScalarOpts.a LLVMTarget.a``.
+
+ Note that this works only for statically linked libraries. LLVM is split
+ into a large number of static libraries, and the list of libraries you
+ require may be much longer than the list above. To see a full list of
+ libraries use: ``llvm-config --libs all``. Using ``LINK_COMPONENTS`` as
+ described below, obviates the need to set ``LLVMLIBS``.
+
+``LINK_COMPONENTS``
+
+ This variable holds a space separated list of components that the LLVM
+ ``Makefiles`` pass to the ``llvm-config`` tool to generate a link line for
+ the program. For example, to link with all LLVM libraries use
+ ``LINK_COMPONENTS = all``.
+
+``LIBS``
+
+ To link dynamic libraries, add ``-l<library base name>`` to the ``LIBS``
+ variable. The LLVM build system will look in the same places for dynamic
+ libraries as it does for static libraries.
+
+ For example, to link ``libsample.so``, you would have the following line in
+ your ``Makefile``:
+
+ .. code-block:: makefile
+
+ LIBS += -lsample
+
+Note that ``LIBS`` must occur in the Makefile after the inclusion of
+``Makefile.common``.
+
+Miscellaneous Variables
+-----------------------
+
+``CFLAGS`` & ``CPPFLAGS``
+
+ This variable can be used to add options to the C and C++ compiler,
+ respectively. It is typically used to add options that tell the compiler
+ the location of additional directories to search for header files.
+
+ It is highly suggested that you append to ``CFLAGS`` and ``CPPFLAGS`` as
+ opposed to overwriting them. The master ``Makefiles`` may already have
+ useful options in them that you may not want to overwrite.
+
+Placement of Object Code
+========================
+
+The final location of built libraries and executables will depend upon whether
+you do a ``Debug``, ``Release``, or ``Profile`` build.
+
+Libraries
+
+ All libraries (static and dynamic) will be stored in
+ ``PROJ_OBJ_ROOT/<type>/lib``, where *type* is ``Debug``, ``Release``, or
+ ``Profile`` for a debug, optimized, or profiled build, respectively.
+
+Executables
+
+ All executables will be stored in ``PROJ_OBJ_ROOT/<type>/bin``, where *type*
+ is ``Debug``, ``Release``, or ``Profile`` for a debug, optimized, or
+ profiled build, respectively.
+
+Further Help
+============
+
+If you have any questions or need any help creating an LLVM project, the LLVM
+team would be more than happy to help. You can always post your questions to
+the `LLVM Developers Mailing List
+<http://lists.cs.uiuc.edu/pipermail/llvmdev/>`_.
diff --git a/docs/README.txt b/docs/README.txt
new file mode 100644
index 00000000000..2fbbf987405
--- /dev/null
+++ b/docs/README.txt
@@ -0,0 +1,12 @@
+LLVM Documentation
+==================
+
+The LLVM documentation is currently written in two formats:
+
+ * Plain HTML documentation.
+
+ * reStructured Text documentation using the Sphinx documentation generator. It
+ is currently tested with Sphinx 1.1.3.
+
+ For more information, see the "Sphinx Introduction for LLVM Developers"
+ document.
diff --git a/docs/ReleaseNotes.html b/docs/ReleaseNotes.html
new file mode 100644
index 00000000000..75a6fd1ca10
--- /dev/null
+++ b/docs/ReleaseNotes.html
@@ -0,0 +1,755 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+ <title>LLVM 3.2 Release Notes</title>
+</head>
+<body>
+
+<h1>LLVM 3.2 Release Notes</h1>
+
+<div>
+<img style="float:right" src="http://llvm.org/img/DragonSmall.png"
+ width="136" height="136" alt="LLVM Dragon Logo">
+</div>
+
+<ol>
+ <li><a href="#intro">Introduction</a></li>
+ <li><a href="#subproj">Sub-project Status Update</a></li>
+ <li><a href="#externalproj">External Projects Using LLVM 3.2</a></li>
+ <li><a href="#whatsnew">What's New in LLVM?</a></li>
+ <li><a href="GettingStarted.html">Installation Instructions</a></li>
+ <li><a href="#knownproblems">Known Problems</a></li>
+ <li><a href="#additionalinfo">Additional Information</a></li>
+</ol>
+
+<div class="doc_author">
+ <p>Written by the <a href="http://llvm.org/">LLVM Team</a></p>
+</div>
+
+<h1 style="color:red">These are in-progress notes for the upcoming LLVM 3.2
+release.<br>
+You may prefer the
+<a href="http://llvm.org/releases/3.1/docs/ReleaseNotes.html">LLVM 3.1
+Release Notes</a>.</h1>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="intro">Introduction</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>This document contains the release notes for the LLVM Compiler
+ Infrastructure, release 3.2. Here we describe the status of LLVM, including
+ major improvements from the previous release, improvements in various
+ subprojects of LLVM, and some of the current users of the code. All LLVM
+ releases may be downloaded from the <a href="http://llvm.org/releases/">LLVM
+ releases web site</a>.</p>
+
+<p>For more information about LLVM, including information about the latest
+ release, please check out the <a href="http://llvm.org/">main LLVM web
+ site</a>. If you have questions or comments,
+ the <a href="http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev">LLVM
+ Developer's Mailing List</a> is a good place to send them.</p>
+
+<p>Note that if you are reading this file from a Subversion checkout or the main
+ LLVM web page, this document applies to the <i>next</i> release, not the
+ current one. To see the release notes for a specific release, please see the
+ <a href="http://llvm.org/releases/">releases page</a>.</p>
+
+</div>
+
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="subproj">Sub-project Status Update</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The LLVM 3.2 distribution currently consists of code from the core LLVM
+ repository, which roughly includes the LLVM optimizers, code generators and
+ supporting tools, and the Clang repository. In addition to this code, the
+ LLVM Project includes other sub-projects that are in development. Here we
+ include updates on these subprojects.</p>
+
+<!--=========================================================================-->
+<h3>
+<a name="clang">Clang: C/C++/Objective-C Frontend Toolkit</a>
+</h3>
+
+<div>
+
+<p><a href="http://clang.llvm.org/">Clang</a> is an LLVM front end for the C,
+ C++, and Objective-C languages. Clang aims to provide a better user
+ experience through expressive diagnostics, a high level of conformance to
+ language standards, fast compilation, and low memory use. Like LLVM, Clang
+ provides a modular, library-based architecture that makes it suitable for
+ creating or integrating with other development tools. Clang is considered a
+ production-quality compiler for C, Objective-C, C++ and Objective-C++ on x86
+ (32- and 64-bit), and for Darwin/ARM targets.</p>
+
+<p>In the LLVM 3.2 time-frame, the Clang team has made many improvements.
+ Highlights include:</p>
+<ul>
+ <li>...</li>
+</ul>
+
+<p>For more details about the changes to Clang since the 3.1 release, see the
+ <a href="http://clang.llvm.org/docs/ReleaseNotes.html">Clang release
+ notes.</a></p>
+
+<p>If Clang rejects your code but another compiler accepts it, please take a
+ look at the <a href="http://clang.llvm.org/compatibility.html">language
+ compatibility</a> guide to make sure this is not intentional or a known
+ issue.</p>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="dragonegg">DragonEgg: GCC front-ends, LLVM back-end</a>
+</h3>
+
+<div>
+
+<p><a href="http://dragonegg.llvm.org/">DragonEgg</a> is a
+ <a href="http://gcc.gnu.org/wiki/plugins">gcc plugin</a> that replaces GCC's
+ optimizers and code generators with LLVM's. It works with gcc-4.5 and gcc-4.6
+ (and partially with gcc-4.7), can target the x86-32/x86-64 and ARM processor
+ families, and has been successfully used on the Darwin, FreeBSD, KFreeBSD,
+ Linux and OpenBSD platforms. It fully supports Ada, C, C++ and Fortran. It
+ has partial support for Go, Java, Obj-C and Obj-C++.</p>
+
+<p>The 3.2 release has the following notable changes:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="compiler-rt">compiler-rt: Compiler Runtime Library</a>
+</h3>
+
+<div>
+
+<p>The new LLVM <a href="http://compiler-rt.llvm.org/">compiler-rt project</a>
+ is a simple library that provides an implementation of the low-level
+ target-specific hooks required by code generation and other runtime
+ components. For example, when compiling for a 32-bit target, converting a
+ double to a 64-bit unsigned integer is compiled into a runtime call to the
+ <code>__fixunsdfdi</code> function. The compiler-rt library provides highly
+ optimized implementations of this and other low-level routines (some are 3x
+ faster than the equivalent libgcc routines).</p>
+
+<p>The 3.2 release has the following notable changes:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="lldb">LLDB: Low Level Debugger</a>
+</h3>
+
+<div>
+
+<p><a href="http://lldb.llvm.org">LLDB</a> is a ground-up implementation of a
+ command line debugger, as well as a debugger API that can be used from other
+ applications. LLDB makes use of the Clang parser to provide high-fidelity
+ expression parsing (particularly for C++) and uses the LLVM JIT for target
+ support.</p>
+
+<p>The 3.2 release has the following notable changes:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="libc++">libc++: C++ Standard Library</a>
+</h3>
+
+<div>
+
+<p>Like compiler_rt, libc++ is now <a href="DeveloperPolicy.html#license">dual
+ licensed</a> under the MIT and UIUC license, allowing it to be used more
+ permissively.</p>
+
+<p>Within the LLVM 3.2 time-frame there were the following highlights:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="vmkit">VMKit</a>
+</h3>
+
+<div>
+
+<p>The <a href="http://vmkit.llvm.org/">VMKit project</a> is an implementation
+ of a Java Virtual Machine (Java VM or JVM) that uses LLVM for static and
+ just-in-time compilation.</p>
+
+<p>The 3.2 release has the following notable changes:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+
+<!--=========================================================================-->
+<h3>
+<a name="Polly">Polly: Polyhedral Optimizer</a>
+</h3>
+
+<div>
+
+<p><a href="http://polly.llvm.org/">Polly</a> is an <em>experimental</em>
+ optimizer for data locality and parallelism. It currently provides high-level
+ loop optimizations and automatic parallelisation (using the OpenMP run time).
+ Work in the area of automatic SIMD and accelerator code generation was
+ started.</p>
+
+<p>Within the LLVM 3.2 time-frame there were the following highlights:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="externalproj">External Open Source Projects Using LLVM 3.2</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>An exciting aspect of LLVM is that it is used as an enabling technology for
+ a lot of other language and tools projects. This section lists some of the
+ projects that have already been updated to work with LLVM 3.2.</p>
+
+<h3>Crack</h3>
+
+<div>
+
+<p><a href="http://code.google.com/p/crack-language/">Crack</a> aims to provide
+ the ease of development of a scripting language with the performance of a
+ compiled language. The language derives concepts from C++, Java and Python,
+ incorporating object-oriented programming, operator overloading and strong
+ typing.</p>
+
+</div>
+
+<h3>FAUST</h3>
+
+<div>
+
+<p><a href="http://faust.grame.fr/">FAUST</a> is a compiled language for
+ real-time audio signal processing. The name FAUST stands for Functional
+ AUdio STream. Its programming model combines two approaches: functional
+ programming and block diagram composition. In addition with the C, C++, Java,
+ JavaScript output formats, the Faust compiler can generate LLVM bitcode, and
+ works with LLVM 2.7-3.1.</p>
+
+</div>
+
+<h3>Glasgow Haskell Compiler (GHC)</h3>
+
+<div>
+
+<p><a href="http://www.haskell.org/ghc/">GHC</a> is an open source compiler and
+ programming suite for Haskell, a lazy functional programming language. It
+ includes an optimizing static compiler generating good code for a variety of
+ platforms, together with an interactive system for convenient, quick
+ development.</p>
+
+<p>GHC 7.0 and onwards include an LLVM code generator, supporting LLVM 2.8 and
+ later.</p>
+
+</div>
+
+<h3>Julia</h3>
+
+<div>
+
+<p><a href="https://github.com/JuliaLang/julia">Julia</a> is a high-level,
+ high-performance dynamic language for technical computing. It provides a
+ sophisticated compiler, distributed parallel execution, numerical accuracy,
+ and an extensive mathematical function library. The compiler uses type
+ inference to generate fast code without any type declarations, and uses
+ LLVM's optimization passes and JIT compiler. The
+ <a href="http://julialang.org/"> Julia Language</a> is designed
+ around multiple dispatch, giving programs a large degree of flexibility. It
+ is ready for use on many kinds of problems.</p>
+
+</div>
+
+<h3>LLVM D Compiler</h3>
+
+<div>
+
+<p><a href="https://github.com/ldc-developers/ldc">LLVM D Compiler</a> (LDC) is
+ a compiler for the D programming Language. It is based on the DMD frontend
+ and uses LLVM as backend.</p>
+
+</div>
+
+<h3>Open Shading Language</h3>
+
+<div>
+
+<p><a href="https://github.com/imageworks/OpenShadingLanguage/">Open Shading
+ Language (OSL)</a> is a small but rich language for programmable shading in
+ advanced global illumination renderers and other applications, ideal for
+ describing materials, lights, displacement, and pattern generation. It uses
+ LLVM to JIT complex shader networks to x86 code at runtime.</p>
+
+<p>OSL was developed by Sony Pictures Imageworks for use in its in-house
+ renderer used for feature film animation and visual effects, and is
+ distributed as open source software with the "New BSD" license.</p>
+
+</div>
+
+<h3>Portable OpenCL (pocl)</h3>
+
+<div>
+
+<p>In addition to producing an easily portable open source OpenCL
+ implementation, another major goal of <a href="http://pocl.sourceforge.net/">
+ pocl</a> is improving performance portability of OpenCL programs with
+ compiler optimizations, reducing the need for target-dependent manual
+ optimizations. An important part of pocl is a set of LLVM passes used to
+ statically parallelize multiple work-items with the kernel compiler, even in
+ the presence of work-group barriers. This enables static parallelization of
+ the fine-grained static concurrency in the work groups in multiple ways
+ (SIMD, VLIW, superscalar,...).</p>
+
+</div>
+
+<h3>Pure</h3>
+
+<div>
+
+<p><a href="http://pure-lang.googlecode.com/">Pure</a> is an
+ algebraic/functional programming language based on term rewriting. Programs
+ are collections of equations which are used to evaluate expressions in a
+ symbolic fashion. The interpreter uses LLVM as a backend to JIT-compile Pure
+ programs to fast native code. Pure offers dynamic typing, eager and lazy
+ evaluation, lexical closures, a hygienic macro system (also based on term
+ rewriting), built-in list and matrix support (including list and matrix
+ comprehensions) and an easy-to-use interface to C and other programming
+ languages (including the ability to load LLVM bitcode modules, and inline C,
+ C++, Fortran and Faust code in Pure programs if the corresponding
+ LLVM-enabled compilers are installed).</p>
+
+<p>Pure version 0.54 has been tested and is known to work with LLVM 3.1 (and
+ continues to work with older LLVM releases >= 2.5).</p>
+
+</div>
+
+<h3>TTA-based Co-design Environment (TCE)</h3>
+
+<div>
+
+<p><a href="http://tce.cs.tut.fi/">TCE</a> is a toolset for designing
+ application-specific processors (ASP) based on the Transport triggered
+ architecture (TTA). The toolset provides a complete co-design flow from C/C++
+ programs down to synthesizable VHDL/Verilog and parallel program binaries.
+ Processor customization points include the register files, function units,
+ supported operations, and the interconnection network.</p>
+
+<p>TCE uses Clang and LLVM for C/C++ language support, target independent
+ optimizations and also for parts of code generation. It generates new
+ LLVM-based code generators "on the fly" for the designed TTA processors and
+ loads them in to the compiler backend as runtime libraries to avoid
+ per-target recompilation of larger parts of the compiler chain.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="whatsnew">What's New in LLVM 3.2?</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>This release includes a huge number of bug fixes, performance tweaks and
+ minor improvements. Some of the major improvements and new features are
+ listed in this section.</p>
+
+<!--=========================================================================-->
+<h3>
+<a name="majorfeatures">Major New Features</a>
+</h3>
+
+<div>
+
+ <!-- Features that need text if they're finished for 3.2:
+ ARM EHABI
+ combiner-aa?
+ strong phi elim
+ loop dependence analysis
+ CorrelatedValuePropagation
+ lib/Transforms/IPO/MergeFunctions.cpp => consider for 3.2.
+ Integrated assembler on by default for arm/thumb?
+
+ -->
+
+ <!-- Near dead:
+ Analysis/RegionInfo.h + Dom Frontiers
+ SparseBitVector: used in LiveVar.
+ llvm/lib/Archive - replace with lib object?
+ -->
+
+<p>LLVM 3.2 includes several major changes and big features:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+
+<!--=========================================================================-->
+<h3>
+<a name="coreimprovements">LLVM IR and Core Improvements</a>
+</h3>
+
+<div>
+
+<p>LLVM IR has several new features for better support of new targets and that
+ expose new optimization opportunities:</p>
+
+<ul>
+ <li>Thread local variables may have a specified TLS model. See the
+ <a href="LangRef.html#globalvars">Language Reference Manual</a>.</li>
+ <li>...</li>
+</ul>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="optimizer">Optimizer Improvements</a>
+</h3>
+
+<div>
+
+<p>In addition to many minor performance tweaks and bug fixes, this release
+ includes a few major enhancements and additions to the optimizers:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="mc">MC Level Improvements</a>
+</h3>
+
+<div>
+
+<p>The LLVM Machine Code (aka MC) subsystem was created to solve a number of
+ problems in the realm of assembly, disassembly, object file format handling,
+ and a number of other related areas that CPU instruction-set level tools work
+ in. For more information, please see the
+ <a href="http://blog.llvm.org/2010/04/intro-to-llvm-mc-project.html">Intro
+ to the LLVM MC Project Blog Post</a>.</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="codegen">Target Independent Code Generator Improvements</a>
+</h3>
+
+<div>
+
+<p>Stack Coloring - We have implemented a new optimization pass
+ to merge stack objects which are used in disjoin areas of the code.
+ This optimization reduces the required stack space significantly, in cases
+ where it is clear to the optimizer that the stack slot is not shared.
+ We use the lifetime markers to tell the codegen that a certain alloca
+ is used within a region.</p>
+
+<p>We have put a significant amount of work into the code generator
+ infrastructure, which allows us to implement more aggressive algorithms and
+ make it run faster:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+<p> We added new TableGen infrastructure to support bundling for
+ Very Long Instruction Word (VLIW) architectures. TableGen can now
+ automatically generate a deterministic finite automaton from a VLIW
+ target's schedule description which can be queried to determine
+ legal groupings of instructions in a bundle.</p>
+
+<p> We have added a new target independent VLIW packetizer based on the
+ DFA infrastructure to group machine instructions into bundles.</p>
+
+</div>
+
+<h4>
+<a name="blockplacement">Basic Block Placement</a>
+</h4>
+
+<div>
+
+<p>A probability based block placement and code layout algorithm was added to
+ LLVM's code generator. This layout pass supports probabilities derived from
+ static heuristics as well as source code annotations such as
+ <code>__builtin_expect</code>.</p>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="x86">X86-32 and X86-64 Target Improvements</a>
+</h3>
+
+<div>
+
+<p>New features and major changes in the X86 target include:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="ARM">ARM Target Improvements</a>
+</h3>
+
+<div>
+
+<p>New features of the ARM target include:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+<!--_________________________________________________________________________-->
+
+<h4>
+<a name="armintegratedassembler">ARM Integrated Assembler</a>
+</h4>
+
+<div>
+
+<p>The ARM target now includes a full featured macro assembler, including
+ direct-to-object module support for clang. The assembler is currently enabled
+ by default for Darwin only pending testing and any additional necessary
+ platform specific support for Linux.</p>
+
+<p>Full support is included for Thumb1, Thumb2 and ARM modes, along with
+ subtarget and CPU specific extensions for VFP2, VFP3 and NEON.</p>
+
+<p>The assembler is Unified Syntax only (see ARM Architecural Reference Manual
+ for details). While there is some, and growing, support for pre-unfied
+ (divided) syntax, there are still significant gaps in that support.</p>
+
+</div>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="MIPS">MIPS Target Improvements</a>
+</h3>
+
+<div>
+
+<p>New features and major changes in the MIPS target include:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="OtherTS">Other Target Specific Improvements</a>
+</h3>
+
+<div>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="changes">Major Changes and Removed Features</a>
+</h3>
+
+<div>
+
+<p>If you're already an LLVM user or developer with out-of-tree changes based on
+ LLVM 3.2, this section lists some "gotchas" that you may run into upgrading
+ from the previous release.</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="api_changes">Internal API Changes</a>
+</h3>
+
+<div>
+
+<p>In addition, many APIs have changed in this release. Some of the major
+ LLVM API changes are:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+<!--=========================================================================-->
+<h3>
+<a name="tools_changes">Tools Changes</a>
+</h3>
+
+<div>
+
+<p>In addition, some tools have changed in this release. Some of the changes
+ are:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+
+<!--=========================================================================-->
+<h3>
+<a name="python">Python Bindings</a>
+</h3>
+
+<div>
+
+<p>Officially supported Python bindings have been added! Feature support is far
+ from complete. The current bindings support interfaces to:</p>
+
+<ul>
+ <li>...</li>
+</ul>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="knownproblems">Known Problems</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>LLVM is generally a production quality compiler, and is used by a broad range
+ of applications and shipping in many products. That said, not every
+ subsystem is as mature as the aggregate, particularly the more obscure
+ targets. If you run into a problem, please check
+ the <a href="http://llvm.org/bugs/">LLVM bug database</a> and submit a bug if
+ there isn't already one or ask on
+ the <a href="http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev">LLVMdev
+ list</a>.</p>
+
+ <p>Known problem areas include:</p>
+
+<ul>
+ <li>The CellSPU, MSP430, PTX and XCore backends are experimental.</li>
+
+ <li>The integrated assembler, disassembler, and JIT is not supported by
+ several targets. If an integrated assembler is not supported, then a
+ system assembler is required. For more details, see the <a
+ href="CodeGenerator.html#targetfeatures">Target Features Matrix</a>.
+ </li>
+</ul>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="additionalinfo">Additional Information</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>A wide variety of additional information is available on
+ the <a href="http://llvm.org/">LLVM web page</a>, in particular in
+ the <a href="http://llvm.org/docs/">documentation</a> section. The web page
+ also contains versions of the API documentation which is up-to-date with the
+ Subversion version of the source code. You can access versions of these
+ documents specific to this release by going into the "<tt>llvm/doc/</tt>"
+ directory in the LLVM tree.</p>
+
+<p>If you have any questions or comments about LLVM, please feel free to contact
+ us via the <a href="http://llvm.org/docs/#maillist"> mailing lists</a>.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="http://llvm.org/">LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+
+</body>
+</html>
diff --git a/docs/SegmentedStacks.rst b/docs/SegmentedStacks.rst
new file mode 100644
index 00000000000..f97d62abda0
--- /dev/null
+++ b/docs/SegmentedStacks.rst
@@ -0,0 +1,80 @@
+.. _segmented_stacks:
+
+========================
+Segmented Stacks in LLVM
+========================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+Segmented stack allows stack space to be allocated incrementally than as a
+monolithic chunk (of some worst case size) at thread initialization. This is
+done by allocating stack blocks (henceforth called *stacklets*) and linking them
+into a doubly linked list. The function prologue is responsible for checking if
+the current stacklet has enough space for the function to execute; and if not,
+call into the libgcc runtime to allocate more stack space. When using ``llc``,
+segmented stacks can be enabled by adding ``-segmented-stacks`` to the command
+line.
+
+The runtime functionality is `already there in libgcc
+<http://gcc.gnu.org/wiki/SplitStacks>`_.
+
+Implementation Details
+======================
+
+.. _allocating stacklets:
+
+Allocating Stacklets
+--------------------
+
+As mentioned above, the function prologue checks if the current stacklet has
+enough space. The current approach is to use a slot in the TCB to store the
+current stack limit (minus the amount of space needed to allocate a new block) -
+this slot's offset is again dictated by ``libgcc``. The generated
+assembly looks like this on x86-64:
+
+.. code-block:: nasm
+
+ leaq -8(%rsp), %r10
+ cmpq %fs:112, %r10
+ jg .LBB0_2
+
+ # More stack space needs to be allocated
+ movabsq $8, %r10 # The amount of space needed
+ movabsq $0, %r11 # The total size of arguments passed on stack
+ callq __morestack
+ ret # The reason for this extra return is explained below
+ .LBB0_2:
+ # Usual prologue continues here
+
+The size of function arguments on the stack needs to be passed to
+``__morestack`` (this function is implemented in ``libgcc``) since that number
+of bytes has to be copied from the previous stacklet to the current one. This is
+so that SP (and FP) relative addressing of function arguments work as expected.
+
+The unusual ``ret`` is needed to have the function which made a call to
+``__morestack`` return correctly. ``__morestack``, instead of returning, calls
+into ``.LBB0_2``. This is possible since both, the size of the ``ret``
+instruction and the PC of call to ``__morestack`` are known. When the function
+body returns, control is transferred back to ``__morestack``. ``__morestack``
+then de-allocates the new stacklet, restores the correct SP value, and does a
+second return, which returns control to the correct caller.
+
+Variable Sized Allocas
+----------------------
+
+The section on `allocating stacklets`_ automatically assumes that every stack
+frame will be of fixed size. However, LLVM allows the use of the ``llvm.alloca``
+intrinsic to allocate dynamically sized blocks of memory on the stack. When
+faced with such a variable-sized alloca, code is generated to:
+
+* Check if the current stacklet has enough space. If yes, just bump the SP, like
+ in the normal case.
+* If not, generate a call to ``libgcc``, which allocates the memory from the
+ heap.
+
+The memory allocated from the heap is linked into a list in the current
+stacklet, and freed along with the same. This prevents a memory leak.
diff --git a/docs/SourceLevelDebugging.html b/docs/SourceLevelDebugging.html
new file mode 100644
index 00000000000..918383bc213
--- /dev/null
+++ b/docs/SourceLevelDebugging.html
@@ -0,0 +1,2858 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>Source Level Debugging with LLVM</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+<body>
+
+<h1>Source Level Debugging with LLVM</h1>
+
+<table class="layout" style="width:100%">
+ <tr class="layout">
+ <td class="left">
+<ul>
+ <li><a href="#introduction">Introduction</a>
+ <ol>
+ <li><a href="#phil">Philosophy behind LLVM debugging information</a></li>
+ <li><a href="#consumers">Debug information consumers</a></li>
+ <li><a href="#debugopt">Debugging optimized code</a></li>
+ </ol></li>
+ <li><a href="#format">Debugging information format</a>
+ <ol>
+ <li><a href="#debug_info_descriptors">Debug information descriptors</a>
+ <ul>
+ <li><a href="#format_compile_units">Compile unit descriptors</a></li>
+ <li><a href="#format_files">File descriptors</a></li>
+ <li><a href="#format_global_variables">Global variable descriptors</a></li>
+ <li><a href="#format_subprograms">Subprogram descriptors</a></li>
+ <li><a href="#format_blocks">Block descriptors</a></li>
+ <li><a href="#format_basic_type">Basic type descriptors</a></li>
+ <li><a href="#format_derived_type">Derived type descriptors</a></li>
+ <li><a href="#format_composite_type">Composite type descriptors</a></li>
+ <li><a href="#format_subrange">Subrange descriptors</a></li>
+ <li><a href="#format_enumeration">Enumerator descriptors</a></li>
+ <li><a href="#format_variables">Local variables</a></li>
+ </ul></li>
+ <li><a href="#format_common_intrinsics">Debugger intrinsic functions</a>
+ <ul>
+ <li><a href="#format_common_declare">llvm.dbg.declare</a></li>
+ <li><a href="#format_common_value">llvm.dbg.value</a></li>
+ </ul></li>
+ </ol></li>
+ <li><a href="#format_common_lifetime">Object lifetimes and scoping</a></li>
+ <li><a href="#ccxx_frontend">C/C++ front-end specific debug information</a>
+ <ol>
+ <li><a href="#ccxx_compile_units">C/C++ source file information</a></li>
+ <li><a href="#ccxx_global_variable">C/C++ global variable information</a></li>
+ <li><a href="#ccxx_subprogram">C/C++ function information</a></li>
+ <li><a href="#ccxx_basic_types">C/C++ basic types</a></li>
+ <li><a href="#ccxx_derived_types">C/C++ derived types</a></li>
+ <li><a href="#ccxx_composite_types">C/C++ struct/union types</a></li>
+ <li><a href="#ccxx_enumeration_types">C/C++ enumeration types</a></li>
+ </ol></li>
+ <li><a href="#llvmdwarfextension">LLVM Dwarf Extensions</a>
+ <ol>
+ <li><a href="#objcproperty">Debugging Information Extension
+ for Objective C Properties</a>
+ <ul>
+ <li><a href="#objcpropertyintroduction">Introduction</a></li>
+ <li><a href="#objcpropertyproposal">Proposal</a></li>
+ <li><a href="#objcpropertynewattributes">New DWARF Attributes</a></li>
+ <li><a href="#objcpropertynewconstants">New DWARF Constants</a></li>
+ </ul>
+ </li>
+ <li><a href="#acceltable">Name Accelerator Tables</a>
+ <ul>
+ <li><a href="#acceltableintroduction">Introduction</a></li>
+ <li><a href="#acceltablehashes">Hash Tables</a></li>
+ <li><a href="#acceltabledetails">Details</a></li>
+ <li><a href="#acceltablecontents">Contents</a></li>
+ <li><a href="#acceltableextensions">Language Extensions and File Format Changes</a></li>
+ </ul>
+ </li>
+ </ol>
+ </li>
+</ul>
+</td>
+</tr></table>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a>
+ and <a href="mailto:jlaskey@mac.com">Jim Laskey</a></p>
+</div>
+
+
+<!-- *********************************************************************** -->
+<h2><a name="introduction">Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>This document is the central repository for all information pertaining to
+ debug information in LLVM. It describes the <a href="#format">actual format
+ that the LLVM debug information</a> takes, which is useful for those
+ interested in creating front-ends or dealing directly with the information.
+ Further, this document provides specific examples of what debug information
+ for C/C++ looks like.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="phil">Philosophy behind LLVM debugging information</a>
+</h3>
+
+<div>
+
+<p>The idea of the LLVM debugging information is to capture how the important
+ pieces of the source-language's Abstract Syntax Tree map onto LLVM code.
+ Several design aspects have shaped the solution that appears here. The
+ important ones are:</p>
+
+<ul>
+ <li>Debugging information should have very little impact on the rest of the
+ compiler. No transformations, analyses, or code generators should need to
+ be modified because of debugging information.</li>
+
+ <li>LLVM optimizations should interact in <a href="#debugopt">well-defined and
+ easily described ways</a> with the debugging information.</li>
+
+ <li>Because LLVM is designed to support arbitrary programming languages,
+ LLVM-to-LLVM tools should not need to know anything about the semantics of
+ the source-level-language.</li>
+
+ <li>Source-level languages are often <b>widely</b> different from one another.
+ LLVM should not put any restrictions of the flavor of the source-language,
+ and the debugging information should work with any language.</li>
+
+ <li>With code generator support, it should be possible to use an LLVM compiler
+ to compile a program to native machine code and standard debugging
+ formats. This allows compatibility with traditional machine-code level
+ debuggers, like GDB or DBX.</li>
+</ul>
+
+<p>The approach used by the LLVM implementation is to use a small set
+ of <a href="#format_common_intrinsics">intrinsic functions</a> to define a
+ mapping between LLVM program objects and the source-level objects. The
+ description of the source-level program is maintained in LLVM metadata
+ in an <a href="#ccxx_frontend">implementation-defined format</a>
+ (the C/C++ front-end currently uses working draft 7 of
+ the <a href="http://www.eagercon.com/dwarf/dwarf3std.htm">DWARF 3
+ standard</a>).</p>
+
+<p>When a program is being debugged, a debugger interacts with the user and
+ turns the stored debug information into source-language specific information.
+ As such, a debugger must be aware of the source-language, and is thus tied to
+ a specific language or family of languages.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="consumers">Debug information consumers</a>
+</h3>
+
+<div>
+
+<p>The role of debug information is to provide meta information normally
+ stripped away during the compilation process. This meta information provides
+ an LLVM user a relationship between generated code and the original program
+ source code.</p>
+
+<p>Currently, debug information is consumed by DwarfDebug to produce dwarf
+ information used by the gdb debugger. Other targets could use the same
+ information to produce stabs or other debug forms.</p>
+
+<p>It would also be reasonable to use debug information to feed profiling tools
+ for analysis of generated code, or, tools for reconstructing the original
+ source from generated code.</p>
+
+<p>TODO - expound a bit more.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="debugopt">Debugging optimized code</a>
+</h3>
+
+<div>
+
+<p>An extremely high priority of LLVM debugging information is to make it
+ interact well with optimizations and analysis. In particular, the LLVM debug
+ information provides the following guarantees:</p>
+
+<ul>
+ <li>LLVM debug information <b>always provides information to accurately read
+ the source-level state of the program</b>, regardless of which LLVM
+ optimizations have been run, and without any modification to the
+ optimizations themselves. However, some optimizations may impact the
+ ability to modify the current state of the program with a debugger, such
+ as setting program variables, or calling functions that have been
+ deleted.</li>
+
+ <li>As desired, LLVM optimizations can be upgraded to be aware of the LLVM
+ debugging information, allowing them to update the debugging information
+ as they perform aggressive optimizations. This means that, with effort,
+ the LLVM optimizers could optimize debug code just as well as non-debug
+ code.</li>
+
+ <li>LLVM debug information does not prevent optimizations from
+ happening (for example inlining, basic block reordering/merging/cleanup,
+ tail duplication, etc).</li>
+
+ <li>LLVM debug information is automatically optimized along with the rest of
+ the program, using existing facilities. For example, duplicate
+ information is automatically merged by the linker, and unused information
+ is automatically removed.</li>
+</ul>
+
+<p>Basically, the debug information allows you to compile a program with
+ "<tt>-O0 -g</tt>" and get full debug information, allowing you to arbitrarily
+ modify the program as it executes from a debugger. Compiling a program with
+ "<tt>-O3 -g</tt>" gives you full debug information that is always available
+ and accurate for reading (e.g., you get accurate stack traces despite tail
+ call elimination and inlining), but you might lose the ability to modify the
+ program and call functions where were optimized out of the program, or
+ inlined away completely.</p>
+
+<p><a href="TestingGuide.html#quicktestsuite">LLVM test suite</a> provides a
+ framework to test optimizer's handling of debugging information. It can be
+ run like this:</p>
+
+<div class="doc_code">
+<pre>
+% cd llvm/projects/test-suite/MultiSource/Benchmarks # or some other level
+% make TEST=dbgopt
+</pre>
+</div>
+
+<p>This will test impact of debugging information on optimization passes. If
+ debugging information influences optimization passes then it will be reported
+ as a failure. See <a href="TestingGuide.html">TestingGuide</a> for more
+ information on LLVM test infrastructure and how to run various tests.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="format">Debugging information format</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>LLVM debugging information has been carefully designed to make it possible
+ for the optimizer to optimize the program and debugging information without
+ necessarily having to know anything about debugging information. In
+ particular, the use of metadata avoids duplicated debugging information from
+ the beginning, and the global dead code elimination pass automatically
+ deletes debugging information for a function if it decides to delete the
+ function. </p>
+
+<p>To do this, most of the debugging information (descriptors for types,
+ variables, functions, source files, etc) is inserted by the language
+ front-end in the form of LLVM metadata. </p>
+
+<p>Debug information is designed to be agnostic about the target debugger and
+ debugging information representation (e.g. DWARF/Stabs/etc). It uses a
+ generic pass to decode the information that represents variables, types,
+ functions, namespaces, etc: this allows for arbitrary source-language
+ semantics and type-systems to be used, as long as there is a module
+ written for the target debugger to interpret the information. </p>
+
+<p>To provide basic functionality, the LLVM debugger does have to make some
+ assumptions about the source-level language being debugged, though it keeps
+ these to a minimum. The only common features that the LLVM debugger assumes
+ exist are <a href="#format_files">source files</a>,
+ and <a href="#format_global_variables">program objects</a>. These abstract
+ objects are used by a debugger to form stack traces, show information about
+ local variables, etc.</p>
+
+<p>This section of the documentation first describes the representation aspects
+ common to any source-language. The <a href="#ccxx_frontend">next section</a>
+ describes the data layout conventions used by the C and C++ front-ends.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="debug_info_descriptors">Debug information descriptors</a>
+</h3>
+
+<div>
+
+<p>In consideration of the complexity and volume of debug information, LLVM
+ provides a specification for well formed debug descriptors. </p>
+
+<p>Consumers of LLVM debug information expect the descriptors for program
+ objects to start in a canonical format, but the descriptors can include
+ additional information appended at the end that is source-language
+ specific. All LLVM debugging information is versioned, allowing backwards
+ compatibility in the case that the core structures need to change in some
+ way. Also, all debugging information objects start with a tag to indicate
+ what type of object it is. The source-language is allowed to define its own
+ objects, by using unreserved tag numbers. We recommend using with tags in
+ the range 0x1000 through 0x2000 (there is a defined enum DW_TAG_user_base =
+ 0x1000.)</p>
+
+<p>The fields of debug descriptors used internally by LLVM
+ are restricted to only the simple data types <tt>i32</tt>, <tt>i1</tt>,
+ <tt>float</tt>, <tt>double</tt>, <tt>mdstring</tt> and <tt>mdnode</tt>. </p>
+
+<div class="doc_code">
+<pre>
+!1 = metadata !{
+ i32, ;; A tag
+ ...
+}
+</pre>
+</div>
+
+<p><a name="LLVMDebugVersion">The first field of a descriptor is always an
+ <tt>i32</tt> containing a tag value identifying the content of the
+ descriptor. The remaining fields are specific to the descriptor. The values
+ of tags are loosely bound to the tag values of DWARF information entries.
+ However, that does not restrict the use of the information supplied to DWARF
+ targets. To facilitate versioning of debug information, the tag is augmented
+ with the current debug version (LLVMDebugVersion = 8 &lt;&lt; 16 or
+ 0x80000 or 524288.)</a></p>
+
+<p>The details of the various descriptors follow.</p>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="format_compile_units">Compile unit descriptors</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!0 = metadata !{
+ i32, ;; Tag = 17 + <a href="#LLVMDebugVersion">LLVMDebugVersion</a>
+ ;; (DW_TAG_compile_unit)
+ i32, ;; Unused field.
+ i32, ;; DWARF language identifier (ex. DW_LANG_C89)
+ metadata, ;; Source file name
+ metadata, ;; Source file directory (includes trailing slash)
+ metadata ;; Producer (ex. "4.0.1 LLVM (LLVM research group)")
+ i1, ;; True if this is a main compile unit.
+ i1, ;; True if this is optimized.
+ metadata, ;; Flags
+ i32 ;; Runtime version
+ metadata ;; List of enums types
+ metadata ;; List of retained types
+ metadata ;; List of subprograms
+ metadata ;; List of global variables
+}
+</pre>
+</div>
+
+<p>These descriptors contain a source language ID for the file (we use the DWARF
+ 3.0 ID numbers, such as <tt>DW_LANG_C89</tt>, <tt>DW_LANG_C_plus_plus</tt>,
+ <tt>DW_LANG_Cobol74</tt>, etc), three strings describing the filename,
+ working directory of the compiler, and an identifier string for the compiler
+ that produced it.</p>
+
+<p>Compile unit descriptors provide the root context for objects declared in a
+ specific compilation unit. File descriptors are defined using this context.
+ These descriptors are collected by a named metadata
+ <tt>!llvm.dbg.cu</tt>. Compile unit descriptor keeps track of subprograms,
+ global variables and type information.
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="format_files">File descriptors</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!0 = metadata !{
+ i32, ;; Tag = 41 + <a href="#LLVMDebugVersion">LLVMDebugVersion</a>
+ ;; (DW_TAG_file_type)
+ metadata, ;; Source file name
+ metadata, ;; Source file directory (includes trailing slash)
+ metadata ;; Unused
+}
+</pre>
+</div>
+
+<p>These descriptors contain information for a file. Global variables and top
+ level functions would be defined using this context.k File descriptors also
+ provide context for source line correspondence. </p>
+
+<p>Each input file is encoded as a separate file descriptor in LLVM debugging
+ information output. </p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="format_global_variables">Global variable descriptors</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!1 = metadata !{
+ i32, ;; Tag = 52 + <a href="#LLVMDebugVersion">LLVMDebugVersion</a>
+ ;; (DW_TAG_variable)
+ i32, ;; Unused field.
+ metadata, ;; Reference to context descriptor
+ metadata, ;; Name
+ metadata, ;; Display name (fully qualified C++ name)
+ metadata, ;; MIPS linkage name (for C++)
+ metadata, ;; Reference to file where defined
+ i32, ;; Line number where defined
+ metadata, ;; Reference to type descriptor
+ i1, ;; True if the global is local to compile unit (static)
+ i1, ;; True if the global is defined in the compile unit (not extern)
+ {}* ;; Reference to the global variable
+}
+</pre>
+</div>
+
+<p>These descriptors provide debug information about globals variables. The
+provide details such as name, type and where the variable is defined. All
+global variables are collected inside the named metadata
+<tt>!llvm.dbg.cu</tt>.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="format_subprograms">Subprogram descriptors</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!2 = metadata !{
+ i32, ;; Tag = 46 + <a href="#LLVMDebugVersion">LLVMDebugVersion</a>
+ ;; (DW_TAG_subprogram)
+ i32, ;; Unused field.
+ metadata, ;; Reference to context descriptor
+ metadata, ;; Name
+ metadata, ;; Display name (fully qualified C++ name)
+ metadata, ;; MIPS linkage name (for C++)
+ metadata, ;; Reference to file where defined
+ i32, ;; Line number where defined
+ metadata, ;; Reference to type descriptor
+ i1, ;; True if the global is local to compile unit (static)
+ i1, ;; True if the global is defined in the compile unit (not extern)
+ i32, ;; Line number where the scope of the subprogram begins
+ i32, ;; Virtuality, e.g. dwarf::DW_VIRTUALITY__virtual
+ i32, ;; Index into a virtual function
+ metadata, ;; indicates which base type contains the vtable pointer for the
+ ;; derived class
+ i32, ;; Flags - Artifical, Private, Protected, Explicit, Prototyped.
+ i1, ;; isOptimized
+ Function *,;; Pointer to LLVM function
+ metadata, ;; Lists function template parameters
+ metadata ;; Function declaration descriptor
+ metadata ;; List of function variables
+}
+</pre>
+</div>
+
+<p>These descriptors provide debug information about functions, methods and
+ subprograms. They provide details such as name, return types and the source
+ location where the subprogram is defined.
+</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="format_blocks">Block descriptors</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!3 = metadata !{
+ i32, ;; Tag = 11 + <a href="#LLVMDebugVersion">LLVMDebugVersion</a> (DW_TAG_lexical_block)
+ metadata,;; Reference to context descriptor
+ i32, ;; Line number
+ i32, ;; Column number
+ metadata,;; Reference to source file
+ i32 ;; Unique ID to identify blocks from a template function
+}
+</pre>
+</div>
+
+<p>This descriptor provides debug information about nested blocks within a
+ subprogram. The line number and column numbers are used to dinstinguish
+ two lexical blocks at same depth. </p>
+
+<div class="doc_code">
+<pre>
+!3 = metadata !{
+ i32, ;; Tag = 11 + <a href="#LLVMDebugVersion">LLVMDebugVersion</a> (DW_TAG_lexical_block)
+ metadata ;; Reference to the scope we're annotating with a file change
+ metadata,;; Reference to the file the scope is enclosed in.
+}
+</pre>
+</div>
+
+<p>This descriptor provides a wrapper around a lexical scope to handle file
+ changes in the middle of a lexical block.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="format_basic_type">Basic type descriptors</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!4 = metadata !{
+ i32, ;; Tag = 36 + <a href="#LLVMDebugVersion">LLVMDebugVersion</a>
+ ;; (DW_TAG_base_type)
+ metadata, ;; Reference to context
+ metadata, ;; Name (may be "" for anonymous types)
+ metadata, ;; Reference to file where defined (may be NULL)
+ i32, ;; Line number where defined (may be 0)
+ i64, ;; Size in bits
+ i64, ;; Alignment in bits
+ i64, ;; Offset in bits
+ i32, ;; Flags
+ i32 ;; DWARF type encoding
+}
+</pre>
+</div>
+
+<p>These descriptors define primitive types used in the code. Example int, bool
+ and float. The context provides the scope of the type, which is usually the
+ top level. Since basic types are not usually user defined the context
+ and line number can be left as NULL and 0. The size, alignment and offset
+ are expressed in bits and can be 64 bit values. The alignment is used to
+ round the offset when embedded in a
+ <a href="#format_composite_type">composite type</a> (example to keep float
+ doubles on 64 bit boundaries.) The offset is the bit offset if embedded in
+ a <a href="#format_composite_type">composite type</a>.</p>
+
+<p>The type encoding provides the details of the type. The values are typically
+ one of the following:</p>
+
+<div class="doc_code">
+<pre>
+DW_ATE_address = 1
+DW_ATE_boolean = 2
+DW_ATE_float = 4
+DW_ATE_signed = 5
+DW_ATE_signed_char = 6
+DW_ATE_unsigned = 7
+DW_ATE_unsigned_char = 8
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="format_derived_type">Derived type descriptors</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!5 = metadata !{
+ i32, ;; Tag (see below)
+ metadata, ;; Reference to context
+ metadata, ;; Name (may be "" for anonymous types)
+ metadata, ;; Reference to file where defined (may be NULL)
+ i32, ;; Line number where defined (may be 0)
+ i64, ;; Size in bits
+ i64, ;; Alignment in bits
+ i64, ;; Offset in bits
+ i32, ;; Flags to encode attributes, e.g. private
+ metadata, ;; Reference to type derived from
+ metadata, ;; (optional) Name of the Objective C property associated with
+ ;; Objective-C an ivar
+ metadata, ;; (optional) Name of the Objective C property getter selector.
+ metadata, ;; (optional) Name of the Objective C property setter selector.
+ i32 ;; (optional) Objective C property attributes.
+}
+</pre>
+</div>
+
+<p>These descriptors are used to define types derived from other types. The
+value of the tag varies depending on the meaning. The following are possible
+tag values:</p>
+
+<div class="doc_code">
+<pre>
+DW_TAG_formal_parameter = 5
+DW_TAG_member = 13
+DW_TAG_pointer_type = 15
+DW_TAG_reference_type = 16
+DW_TAG_typedef = 22
+DW_TAG_const_type = 38
+DW_TAG_volatile_type = 53
+DW_TAG_restrict_type = 55
+</pre>
+</div>
+
+<p><tt>DW_TAG_member</tt> is used to define a member of
+ a <a href="#format_composite_type">composite type</a>
+ or <a href="#format_subprograms">subprogram</a>. The type of the member is
+ the <a href="#format_derived_type">derived
+ type</a>. <tt>DW_TAG_formal_parameter</tt> is used to define a member which
+ is a formal argument of a subprogram.</p>
+
+<p><tt>DW_TAG_typedef</tt> is used to provide a name for the derived type.</p>
+
+<p><tt>DW_TAG_pointer_type</tt>, <tt>DW_TAG_reference_type</tt>,
+ <tt>DW_TAG_const_type</tt>, <tt>DW_TAG_volatile_type</tt> and
+ <tt>DW_TAG_restrict_type</tt> are used to qualify
+ the <a href="#format_derived_type">derived type</a>. </p>
+
+<p><a href="#format_derived_type">Derived type</a> location can be determined
+ from the context and line number. The size, alignment and offset are
+ expressed in bits and can be 64 bit values. The alignment is used to round
+ the offset when embedded in a <a href="#format_composite_type">composite
+ type</a> (example to keep float doubles on 64 bit boundaries.) The offset is
+ the bit offset if embedded in a <a href="#format_composite_type">composite
+ type</a>.</p>
+
+<p>Note that the <tt>void *</tt> type is expressed as a type derived from NULL.
+</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="format_composite_type">Composite type descriptors</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!6 = metadata !{
+ i32, ;; Tag (see below)
+ metadata, ;; Reference to context
+ metadata, ;; Name (may be "" for anonymous types)
+ metadata, ;; Reference to file where defined (may be NULL)
+ i32, ;; Line number where defined (may be 0)
+ i64, ;; Size in bits
+ i64, ;; Alignment in bits
+ i64, ;; Offset in bits
+ i32, ;; Flags
+ metadata, ;; Reference to type derived from
+ metadata, ;; Reference to array of member descriptors
+ i32 ;; Runtime languages
+}
+</pre>
+</div>
+
+<p>These descriptors are used to define types that are composed of 0 or more
+elements. The value of the tag varies depending on the meaning. The following
+are possible tag values:</p>
+
+<div class="doc_code">
+<pre>
+DW_TAG_array_type = 1
+DW_TAG_enumeration_type = 4
+DW_TAG_structure_type = 19
+DW_TAG_union_type = 23
+DW_TAG_vector_type = 259
+DW_TAG_subroutine_type = 21
+DW_TAG_inheritance = 28
+</pre>
+</div>
+
+<p>The vector flag indicates that an array type is a native packed vector.</p>
+
+<p>The members of array types (tag = <tt>DW_TAG_array_type</tt>) or vector types
+ (tag = <tt>DW_TAG_vector_type</tt>) are <a href="#format_subrange">subrange
+ descriptors</a>, each representing the range of subscripts at that level of
+ indexing.</p>
+
+<p>The members of enumeration types (tag = <tt>DW_TAG_enumeration_type</tt>) are
+ <a href="#format_enumeration">enumerator descriptors</a>, each representing
+ the definition of enumeration value for the set. All enumeration type
+ descriptors are collected inside the named metadata
+ <tt>!llvm.dbg.cu</tt>.</p>
+
+<p>The members of structure (tag = <tt>DW_TAG_structure_type</tt>) or union (tag
+ = <tt>DW_TAG_union_type</tt>) types are any one of
+ the <a href="#format_basic_type">basic</a>,
+ <a href="#format_derived_type">derived</a>
+ or <a href="#format_composite_type">composite</a> type descriptors, each
+ representing a field member of the structure or union.</p>
+
+<p>For C++ classes (tag = <tt>DW_TAG_structure_type</tt>), member descriptors
+ provide information about base classes, static members and member
+ functions. If a member is a <a href="#format_derived_type">derived type
+ descriptor</a> and has a tag of <tt>DW_TAG_inheritance</tt>, then the type
+ represents a base class. If the member of is
+ a <a href="#format_global_variables">global variable descriptor</a> then it
+ represents a static member. And, if the member is
+ a <a href="#format_subprograms">subprogram descriptor</a> then it represents
+ a member function. For static members and member
+ functions, <tt>getName()</tt> returns the members link or the C++ mangled
+ name. <tt>getDisplayName()</tt> the simplied version of the name.</p>
+
+<p>The first member of subroutine (tag = <tt>DW_TAG_subroutine_type</tt>) type
+ elements is the return type for the subroutine. The remaining elements are
+ the formal arguments to the subroutine.</p>
+
+<p><a href="#format_composite_type">Composite type</a> location can be
+ determined from the context and line number. The size, alignment and
+ offset are expressed in bits and can be 64 bit values. The alignment is used
+ to round the offset when embedded in
+ a <a href="#format_composite_type">composite type</a> (as an example, to keep
+ float doubles on 64 bit boundaries.) The offset is the bit offset if embedded
+ in a <a href="#format_composite_type">composite type</a>.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="format_subrange">Subrange descriptors</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!42 = metadata !{
+ i32, ;; Tag = 33 + <a href="#LLVMDebugVersion">LLVMDebugVersion</a> (DW_TAG_subrange_type)
+ i64, ;; Low value
+ i64 ;; High value
+}
+</pre>
+</div>
+
+<p>These descriptors are used to define ranges of array subscripts for an array
+ <a href="#format_composite_type">composite type</a>. The low value defines
+ the lower bounds typically zero for C/C++. The high value is the upper
+ bounds. Values are 64 bit. High - low + 1 is the size of the array. If low
+ > high the array bounds are not included in generated debugging information.
+</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="format_enumeration">Enumerator descriptors</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!6 = metadata !{
+ i32, ;; Tag = 40 + <a href="#LLVMDebugVersion">LLVMDebugVersion</a>
+ ;; (DW_TAG_enumerator)
+ metadata, ;; Name
+ i64 ;; Value
+}
+</pre>
+</div>
+
+<p>These descriptors are used to define members of an
+ enumeration <a href="#format_composite_type">composite type</a>, it
+ associates the name to the value.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="format_variables">Local variables</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!7 = metadata !{
+ i32, ;; Tag (see below)
+ metadata, ;; Context
+ metadata, ;; Name
+ metadata, ;; Reference to file where defined
+ i32, ;; 24 bit - Line number where defined
+ ;; 8 bit - Argument number. 1 indicates 1st argument.
+ metadata, ;; Type descriptor
+ i32, ;; flags
+ metadata ;; (optional) Reference to inline location
+}
+</pre>
+</div>
+
+<p>These descriptors are used to define variables local to a sub program. The
+ value of the tag depends on the usage of the variable:</p>
+
+<div class="doc_code">
+<pre>
+DW_TAG_auto_variable = 256
+DW_TAG_arg_variable = 257
+DW_TAG_return_variable = 258
+</pre>
+</div>
+
+<p>An auto variable is any variable declared in the body of the function. An
+ argument variable is any variable that appears as a formal argument to the
+ function. A return variable is used to track the result of a function and
+ has no source correspondent.</p>
+
+<p>The context is either the subprogram or block where the variable is defined.
+ Name the source variable name. Context and line indicate where the
+ variable was defined. Type descriptor defines the declared type of the
+ variable.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="format_common_intrinsics">Debugger intrinsic functions</a>
+</h3>
+
+<div>
+
+<p>LLVM uses several intrinsic functions (name prefixed with "llvm.dbg") to
+ provide debug information at various points in generated code.</p>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="format_common_declare">llvm.dbg.declare</a>
+</h4>
+
+<div>
+<pre>
+ void %<a href="#format_common_declare">llvm.dbg.declare</a>(metadata, metadata)
+</pre>
+
+<p>This intrinsic provides information about a local element (e.g., variable). The
+ first argument is metadata holding the alloca for the variable. The
+ second argument is metadata containing a description of the variable.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="format_common_value">llvm.dbg.value</a>
+</h4>
+
+<div>
+<pre>
+ void %<a href="#format_common_value">llvm.dbg.value</a>(metadata, i64, metadata)
+</pre>
+
+<p>This intrinsic provides information when a user source variable is set to a
+ new value. The first argument is the new value (wrapped as metadata). The
+ second argument is the offset in the user source variable where the new value
+ is written. The third argument is metadata containing a description of the
+ user source variable.</p>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="format_common_lifetime">Object lifetimes and scoping</a>
+</h3>
+
+<div>
+<p>In many languages, the local variables in functions can have their lifetimes
+ or scopes limited to a subset of a function. In the C family of languages,
+ for example, variables are only live (readable and writable) within the
+ source block that they are defined in. In functional languages, values are
+ only readable after they have been defined. Though this is a very obvious
+ concept, it is non-trivial to model in LLVM, because it has no notion of
+ scoping in this sense, and does not want to be tied to a language's scoping
+ rules.</p>
+
+<p>In order to handle this, the LLVM debug format uses the metadata attached to
+ llvm instructions to encode line number and scoping information. Consider
+ the following C fragment, for example:</p>
+
+<div class="doc_code">
+<pre>
+1. void foo() {
+2. int X = 21;
+3. int Y = 22;
+4. {
+5. int Z = 23;
+6. Z = X;
+7. }
+8. X = Y;
+9. }
+</pre>
+</div>
+
+<p>Compiled to LLVM, this function would be represented like this:</p>
+
+<div class="doc_code">
+<pre>
+define void @foo() nounwind ssp {
+entry:
+ %X = alloca i32, align 4 ; &lt;i32*&gt; [#uses=4]
+ %Y = alloca i32, align 4 ; &lt;i32*&gt; [#uses=4]
+ %Z = alloca i32, align 4 ; &lt;i32*&gt; [#uses=3]
+ %0 = bitcast i32* %X to {}* ; &lt;{}*&gt; [#uses=1]
+ call void @llvm.dbg.declare(metadata !{i32 * %X}, metadata !0), !dbg !7
+ store i32 21, i32* %X, !dbg !8
+ %1 = bitcast i32* %Y to {}* ; &lt;{}*&gt; [#uses=1]
+ call void @llvm.dbg.declare(metadata !{i32 * %Y}, metadata !9), !dbg !10
+ store i32 22, i32* %Y, !dbg !11
+ %2 = bitcast i32* %Z to {}* ; &lt;{}*&gt; [#uses=1]
+ call void @llvm.dbg.declare(metadata !{i32 * %Z}, metadata !12), !dbg !14
+ store i32 23, i32* %Z, !dbg !15
+ %tmp = load i32* %X, !dbg !16 ; &lt;i32&gt; [#uses=1]
+ %tmp1 = load i32* %Y, !dbg !16 ; &lt;i32&gt; [#uses=1]
+ %add = add nsw i32 %tmp, %tmp1, !dbg !16 ; &lt;i32&gt; [#uses=1]
+ store i32 %add, i32* %Z, !dbg !16
+ %tmp2 = load i32* %Y, !dbg !17 ; &lt;i32&gt; [#uses=1]
+ store i32 %tmp2, i32* %X, !dbg !17
+ ret void, !dbg !18
+}
+
+declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
+
+!0 = metadata !{i32 459008, metadata !1, metadata !"X",
+ metadata !3, i32 2, metadata !6}; [ DW_TAG_auto_variable ]
+!1 = metadata !{i32 458763, metadata !2}; [DW_TAG_lexical_block ]
+!2 = metadata !{i32 458798, i32 0, metadata !3, metadata !"foo", metadata !"foo",
+ metadata !"foo", metadata !3, i32 1, metadata !4,
+ i1 false, i1 true}; [DW_TAG_subprogram ]
+!3 = metadata !{i32 458769, i32 0, i32 12, metadata !"foo.c",
+ metadata !"/private/tmp", metadata !"clang 1.1", i1 true,
+ i1 false, metadata !"", i32 0}; [DW_TAG_compile_unit ]
+!4 = metadata !{i32 458773, metadata !3, metadata !"", null, i32 0, i64 0, i64 0,
+ i64 0, i32 0, null, metadata !5, i32 0}; [DW_TAG_subroutine_type ]
+!5 = metadata !{null}
+!6 = metadata !{i32 458788, metadata !3, metadata !"int", metadata !3, i32 0,
+ i64 32, i64 32, i64 0, i32 0, i32 5}; [DW_TAG_base_type ]
+!7 = metadata !{i32 2, i32 7, metadata !1, null}
+!8 = metadata !{i32 2, i32 3, metadata !1, null}
+!9 = metadata !{i32 459008, metadata !1, metadata !"Y", metadata !3, i32 3,
+ metadata !6}; [ DW_TAG_auto_variable ]
+!10 = metadata !{i32 3, i32 7, metadata !1, null}
+!11 = metadata !{i32 3, i32 3, metadata !1, null}
+!12 = metadata !{i32 459008, metadata !13, metadata !"Z", metadata !3, i32 5,
+ metadata !6}; [ DW_TAG_auto_variable ]
+!13 = metadata !{i32 458763, metadata !1}; [DW_TAG_lexical_block ]
+!14 = metadata !{i32 5, i32 9, metadata !13, null}
+!15 = metadata !{i32 5, i32 5, metadata !13, null}
+!16 = metadata !{i32 6, i32 5, metadata !13, null}
+!17 = metadata !{i32 8, i32 3, metadata !1, null}
+!18 = metadata !{i32 9, i32 1, metadata !2, null}
+</pre>
+</div>
+
+<p>This example illustrates a few important details about LLVM debugging
+ information. In particular, it shows how the <tt>llvm.dbg.declare</tt>
+ intrinsic and location information, which are attached to an instruction,
+ are applied together to allow a debugger to analyze the relationship between
+ statements, variable definitions, and the code used to implement the
+ function.</p>
+
+<div class="doc_code">
+<pre>
+call void @llvm.dbg.declare(metadata, metadata !0), !dbg !7
+</pre>
+</div>
+
+<p>The first intrinsic
+ <tt>%<a href="#format_common_declare">llvm.dbg.declare</a></tt>
+ encodes debugging information for the variable <tt>X</tt>. The metadata
+ <tt>!dbg !7</tt> attached to the intrinsic provides scope information for the
+ variable <tt>X</tt>.</p>
+
+<div class="doc_code">
+<pre>
+!7 = metadata !{i32 2, i32 7, metadata !1, null}
+!1 = metadata !{i32 458763, metadata !2}; [DW_TAG_lexical_block ]
+!2 = metadata !{i32 458798, i32 0, metadata !3, metadata !"foo",
+ metadata !"foo", metadata !"foo", metadata !3, i32 1,
+ metadata !4, i1 false, i1 true}; [DW_TAG_subprogram ]
+</pre>
+</div>
+
+<p>Here <tt>!7</tt> is metadata providing location information. It has four
+ fields: line number, column number, scope, and original scope. The original
+ scope represents inline location if this instruction is inlined inside a
+ caller, and is null otherwise. In this example, scope is encoded by
+ <tt>!1</tt>. <tt>!1</tt> represents a lexical block inside the scope
+ <tt>!2</tt>, where <tt>!2</tt> is a
+ <a href="#format_subprograms">subprogram descriptor</a>. This way the
+ location information attached to the intrinsics indicates that the
+ variable <tt>X</tt> is declared at line number 2 at a function level scope in
+ function <tt>foo</tt>.</p>
+
+<p>Now lets take another example.</p>
+
+<div class="doc_code">
+<pre>
+call void @llvm.dbg.declare(metadata, metadata !12), !dbg !14
+</pre>
+</div>
+
+<p>The second intrinsic
+ <tt>%<a href="#format_common_declare">llvm.dbg.declare</a></tt>
+ encodes debugging information for variable <tt>Z</tt>. The metadata
+ <tt>!dbg !14</tt> attached to the intrinsic provides scope information for
+ the variable <tt>Z</tt>.</p>
+
+<div class="doc_code">
+<pre>
+!13 = metadata !{i32 458763, metadata !1}; [DW_TAG_lexical_block ]
+!14 = metadata !{i32 5, i32 9, metadata !13, null}
+</pre>
+</div>
+
+<p>Here <tt>!14</tt> indicates that <tt>Z</tt> is declared at line number 5 and
+ column number 9 inside of lexical scope <tt>!13</tt>. The lexical scope
+ itself resides inside of lexical scope <tt>!1</tt> described above.</p>
+
+<p>The scope information attached with each instruction provides a
+ straightforward way to find instructions covered by a scope.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="ccxx_frontend">C/C++ front-end specific debug information</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The C and C++ front-ends represent information about the program in a format
+ that is effectively identical
+ to <a href="http://www.eagercon.com/dwarf/dwarf3std.htm">DWARF 3.0</a> in
+ terms of information content. This allows code generators to trivially
+ support native debuggers by generating standard dwarf information, and
+ contains enough information for non-dwarf targets to translate it as
+ needed.</p>
+
+<p>This section describes the forms used to represent C and C++ programs. Other
+ languages could pattern themselves after this (which itself is tuned to
+ representing programs in the same way that DWARF 3 does), or they could
+ choose to provide completely different forms if they don't fit into the DWARF
+ model. As support for debugging information gets added to the various LLVM
+ source-language front-ends, the information used should be documented
+ here.</p>
+
+<p>The following sections provide examples of various C/C++ constructs and the
+ debug information that would best describe those constructs.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ccxx_compile_units">C/C++ source file information</a>
+</h3>
+
+<div>
+
+<p>Given the source files <tt>MySource.cpp</tt> and <tt>MyHeader.h</tt> located
+ in the directory <tt>/Users/mine/sources</tt>, the following code:</p>
+
+<div class="doc_code">
+<pre>
+#include "MyHeader.h"
+
+int main(int argc, char *argv[]) {
+ return 0;
+}
+</pre>
+</div>
+
+<p>a C/C++ front-end would generate the following descriptors:</p>
+
+<div class="doc_code">
+<pre>
+...
+;;
+;; Define the compile unit for the main source file "/Users/mine/sources/MySource.cpp".
+;;
+!2 = metadata !{
+ i32 524305, ;; Tag
+ i32 0, ;; Unused
+ i32 4, ;; Language Id
+ metadata !"MySource.cpp",
+ metadata !"/Users/mine/sources",
+ metadata !"4.2.1 (Based on Apple Inc. build 5649) (LLVM build 00)",
+ i1 true, ;; Main Compile Unit
+ i1 false, ;; Optimized compile unit
+ metadata !"", ;; Compiler flags
+ i32 0} ;; Runtime version
+
+;;
+;; Define the file for the file "/Users/mine/sources/MySource.cpp".
+;;
+!1 = metadata !{
+ i32 524329, ;; Tag
+ metadata !"MySource.cpp",
+ metadata !"/Users/mine/sources",
+ metadata !2 ;; Compile unit
+}
+
+;;
+;; Define the file for the file "/Users/mine/sources/Myheader.h"
+;;
+!3 = metadata !{
+ i32 524329, ;; Tag
+ metadata !"Myheader.h"
+ metadata !"/Users/mine/sources",
+ metadata !2 ;; Compile unit
+}
+
+...
+</pre>
+</div>
+
+<p>llvm::Instruction provides easy access to metadata attached with an
+instruction. One can extract line number information encoded in LLVM IR
+using <tt>Instruction::getMetadata()</tt> and
+<tt>DILocation::getLineNumber()</tt>.
+<pre>
+ if (MDNode *N = I->getMetadata("dbg")) { // Here I is an LLVM instruction
+ DILocation Loc(N); // DILocation is in DebugInfo.h
+ unsigned Line = Loc.getLineNumber();
+ StringRef File = Loc.getFilename();
+ StringRef Dir = Loc.getDirectory();
+ }
+</pre>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ccxx_global_variable">C/C++ global variable information</a>
+</h3>
+
+<div>
+
+<p>Given an integer global variable declared as follows:</p>
+
+<div class="doc_code">
+<pre>
+int MyGlobal = 100;
+</pre>
+</div>
+
+<p>a C/C++ front-end would generate the following descriptors:</p>
+
+<div class="doc_code">
+<pre>
+;;
+;; Define the global itself.
+;;
+%MyGlobal = global int 100
+...
+;;
+;; List of debug info of globals
+;;
+!llvm.dbg.cu = !{!0}
+
+;; Define the compile unit.
+!0 = metadata !{
+ i32 786449, ;; Tag
+ i32 0, ;; Context
+ i32 4, ;; Language
+ metadata !"foo.cpp", ;; File
+ metadata !"/Volumes/Data/tmp", ;; Directory
+ metadata !"clang version 3.1 ", ;; Producer
+ i1 true, ;; Deprecated field
+ i1 false, ;; "isOptimized"?
+ metadata !"", ;; Flags
+ i32 0, ;; Runtime Version
+ metadata !1, ;; Enum Types
+ metadata !1, ;; Retained Types
+ metadata !1, ;; Subprograms
+ metadata !3 ;; Global Variables
+} ; [ DW_TAG_compile_unit ]
+
+;; The Array of Global Variables
+!3 = metadata !{
+ metadata !4
+}
+
+!4 = metadata !{
+ metadata !5
+}
+
+;;
+;; Define the global variable itself.
+;;
+!5 = metadata !{
+ i32 786484, ;; Tag
+ i32 0, ;; Unused
+ null, ;; Unused
+ metadata !"MyGlobal", ;; Name
+ metadata !"MyGlobal", ;; Display Name
+ metadata !"", ;; Linkage Name
+ metadata !6, ;; File
+ i32 1, ;; Line
+ metadata !7, ;; Type
+ i32 0, ;; IsLocalToUnit
+ i32 1, ;; IsDefinition
+ i32* @MyGlobal ;; LLVM-IR Value
+} ; [ DW_TAG_variable ]
+
+;;
+;; Define the file
+;;
+!6 = metadata !{
+ i32 786473, ;; Tag
+ metadata !"foo.cpp", ;; File
+ metadata !"/Volumes/Data/tmp", ;; Directory
+ null ;; Unused
+} ; [ DW_TAG_file_type ]
+
+;;
+;; Define the type
+;;
+!7 = metadata !{
+ i32 786468, ;; Tag
+ null, ;; Unused
+ metadata !"int", ;; Name
+ null, ;; Unused
+ i32 0, ;; Line
+ i64 32, ;; Size in Bits
+ i64 32, ;; Align in Bits
+ i64 0, ;; Offset
+ i32 0, ;; Flags
+ i32 5 ;; Encoding
+} ; [ DW_TAG_base_type ]
+
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ccxx_subprogram">C/C++ function information</a>
+</h3>
+
+<div>
+
+<p>Given a function declared as follows:</p>
+
+<div class="doc_code">
+<pre>
+int main(int argc, char *argv[]) {
+ return 0;
+}
+</pre>
+</div>
+
+<p>a C/C++ front-end would generate the following descriptors:</p>
+
+<div class="doc_code">
+<pre>
+;;
+;; Define the anchor for subprograms. Note that the second field of the
+;; anchor is 46, which is the same as the tag for subprograms
+;; (46 = DW_TAG_subprogram.)
+;;
+!6 = metadata !{
+ i32 524334, ;; Tag
+ i32 0, ;; Unused
+ metadata !1, ;; Context
+ metadata !"main", ;; Name
+ metadata !"main", ;; Display name
+ metadata !"main", ;; Linkage name
+ metadata !1, ;; File
+ i32 1, ;; Line number
+ metadata !4, ;; Type
+ i1 false, ;; Is local
+ i1 true, ;; Is definition
+ i32 0, ;; Virtuality attribute, e.g. pure virtual function
+ i32 0, ;; Index into virtual table for C++ methods
+ i32 0, ;; Type that holds virtual table.
+ i32 0, ;; Flags
+ i1 false, ;; True if this function is optimized
+ Function *, ;; Pointer to llvm::Function
+ null ;; Function template parameters
+}
+;;
+;; Define the subprogram itself.
+;;
+define i32 @main(i32 %argc, i8** %argv) {
+...
+}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ccxx_basic_types">C/C++ basic types</a>
+</h3>
+
+<div>
+
+<p>The following are the basic type descriptors for C/C++ core types:</p>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="ccxx_basic_type_bool">bool</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!2 = metadata !{
+ i32 524324, ;; Tag
+ metadata !1, ;; Context
+ metadata !"bool", ;; Name
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 8, ;; Size in Bits
+ i64 8, ;; Align in Bits
+ i64 0, ;; Offset in Bits
+ i32 0, ;; Flags
+ i32 2 ;; Encoding
+}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="ccxx_basic_char">char</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!2 = metadata !{
+ i32 524324, ;; Tag
+ metadata !1, ;; Context
+ metadata !"char", ;; Name
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 8, ;; Size in Bits
+ i64 8, ;; Align in Bits
+ i64 0, ;; Offset in Bits
+ i32 0, ;; Flags
+ i32 6 ;; Encoding
+}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="ccxx_basic_unsigned_char">unsigned char</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!2 = metadata !{
+ i32 524324, ;; Tag
+ metadata !1, ;; Context
+ metadata !"unsigned char",
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 8, ;; Size in Bits
+ i64 8, ;; Align in Bits
+ i64 0, ;; Offset in Bits
+ i32 0, ;; Flags
+ i32 8 ;; Encoding
+}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="ccxx_basic_short">short</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!2 = metadata !{
+ i32 524324, ;; Tag
+ metadata !1, ;; Context
+ metadata !"short int",
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 16, ;; Size in Bits
+ i64 16, ;; Align in Bits
+ i64 0, ;; Offset in Bits
+ i32 0, ;; Flags
+ i32 5 ;; Encoding
+}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="ccxx_basic_unsigned_short">unsigned short</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!2 = metadata !{
+ i32 524324, ;; Tag
+ metadata !1, ;; Context
+ metadata !"short unsigned int",
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 16, ;; Size in Bits
+ i64 16, ;; Align in Bits
+ i64 0, ;; Offset in Bits
+ i32 0, ;; Flags
+ i32 7 ;; Encoding
+}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="ccxx_basic_int">int</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!2 = metadata !{
+ i32 524324, ;; Tag
+ metadata !1, ;; Context
+ metadata !"int", ;; Name
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 32, ;; Size in Bits
+ i64 32, ;; Align in Bits
+ i64 0, ;; Offset in Bits
+ i32 0, ;; Flags
+ i32 5 ;; Encoding
+}
+</pre></div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="ccxx_basic_unsigned_int">unsigned int</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!2 = metadata !{
+ i32 524324, ;; Tag
+ metadata !1, ;; Context
+ metadata !"unsigned int",
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 32, ;; Size in Bits
+ i64 32, ;; Align in Bits
+ i64 0, ;; Offset in Bits
+ i32 0, ;; Flags
+ i32 7 ;; Encoding
+}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="ccxx_basic_long_long">long long</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!2 = metadata !{
+ i32 524324, ;; Tag
+ metadata !1, ;; Context
+ metadata !"long long int",
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 64, ;; Size in Bits
+ i64 64, ;; Align in Bits
+ i64 0, ;; Offset in Bits
+ i32 0, ;; Flags
+ i32 5 ;; Encoding
+}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="ccxx_basic_unsigned_long_long">unsigned long long</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!2 = metadata !{
+ i32 524324, ;; Tag
+ metadata !1, ;; Context
+ metadata !"long long unsigned int",
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 64, ;; Size in Bits
+ i64 64, ;; Align in Bits
+ i64 0, ;; Offset in Bits
+ i32 0, ;; Flags
+ i32 7 ;; Encoding
+}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="ccxx_basic_float">float</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!2 = metadata !{
+ i32 524324, ;; Tag
+ metadata !1, ;; Context
+ metadata !"float",
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 32, ;; Size in Bits
+ i64 32, ;; Align in Bits
+ i64 0, ;; Offset in Bits
+ i32 0, ;; Flags
+ i32 4 ;; Encoding
+}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="ccxx_basic_double">double</a>
+</h4>
+
+<div>
+
+<div class="doc_code">
+<pre>
+!2 = metadata !{
+ i32 524324, ;; Tag
+ metadata !1, ;; Context
+ metadata !"double",;; Name
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 64, ;; Size in Bits
+ i64 64, ;; Align in Bits
+ i64 0, ;; Offset in Bits
+ i32 0, ;; Flags
+ i32 4 ;; Encoding
+}
+</pre>
+</div>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ccxx_derived_types">C/C++ derived types</a>
+</h3>
+
+<div>
+
+<p>Given the following as an example of C/C++ derived type:</p>
+
+<div class="doc_code">
+<pre>
+typedef const int *IntPtr;
+</pre>
+</div>
+
+<p>a C/C++ front-end would generate the following descriptors:</p>
+
+<div class="doc_code">
+<pre>
+;;
+;; Define the typedef "IntPtr".
+;;
+!2 = metadata !{
+ i32 524310, ;; Tag
+ metadata !1, ;; Context
+ metadata !"IntPtr", ;; Name
+ metadata !3, ;; File
+ i32 0, ;; Line number
+ i64 0, ;; Size in bits
+ i64 0, ;; Align in bits
+ i64 0, ;; Offset in bits
+ i32 0, ;; Flags
+ metadata !4 ;; Derived From type
+}
+
+;;
+;; Define the pointer type.
+;;
+!4 = metadata !{
+ i32 524303, ;; Tag
+ metadata !1, ;; Context
+ metadata !"", ;; Name
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 64, ;; Size in bits
+ i64 64, ;; Align in bits
+ i64 0, ;; Offset in bits
+ i32 0, ;; Flags
+ metadata !5 ;; Derived From type
+}
+;;
+;; Define the const type.
+;;
+!5 = metadata !{
+ i32 524326, ;; Tag
+ metadata !1, ;; Context
+ metadata !"", ;; Name
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 32, ;; Size in bits
+ i64 32, ;; Align in bits
+ i64 0, ;; Offset in bits
+ i32 0, ;; Flags
+ metadata !6 ;; Derived From type
+}
+;;
+;; Define the int type.
+;;
+!6 = metadata !{
+ i32 524324, ;; Tag
+ metadata !1, ;; Context
+ metadata !"int", ;; Name
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 32, ;; Size in bits
+ i64 32, ;; Align in bits
+ i64 0, ;; Offset in bits
+ i32 0, ;; Flags
+ 5 ;; Encoding
+}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ccxx_composite_types">C/C++ struct/union types</a>
+</h3>
+
+<div>
+
+<p>Given the following as an example of C/C++ struct type:</p>
+
+<div class="doc_code">
+<pre>
+struct Color {
+ unsigned Red;
+ unsigned Green;
+ unsigned Blue;
+};
+</pre>
+</div>
+
+<p>a C/C++ front-end would generate the following descriptors:</p>
+
+<div class="doc_code">
+<pre>
+;;
+;; Define basic type for unsigned int.
+;;
+!5 = metadata !{
+ i32 524324, ;; Tag
+ metadata !1, ;; Context
+ metadata !"unsigned int",
+ metadata !1, ;; File
+ i32 0, ;; Line number
+ i64 32, ;; Size in Bits
+ i64 32, ;; Align in Bits
+ i64 0, ;; Offset in Bits
+ i32 0, ;; Flags
+ i32 7 ;; Encoding
+}
+;;
+;; Define composite type for struct Color.
+;;
+!2 = metadata !{
+ i32 524307, ;; Tag
+ metadata !1, ;; Context
+ metadata !"Color", ;; Name
+ metadata !1, ;; Compile unit
+ i32 1, ;; Line number
+ i64 96, ;; Size in bits
+ i64 32, ;; Align in bits
+ i64 0, ;; Offset in bits
+ i32 0, ;; Flags
+ null, ;; Derived From
+ metadata !3, ;; Elements
+ i32 0 ;; Runtime Language
+}
+
+;;
+;; Define the Red field.
+;;
+!4 = metadata !{
+ i32 524301, ;; Tag
+ metadata !1, ;; Context
+ metadata !"Red", ;; Name
+ metadata !1, ;; File
+ i32 2, ;; Line number
+ i64 32, ;; Size in bits
+ i64 32, ;; Align in bits
+ i64 0, ;; Offset in bits
+ i32 0, ;; Flags
+ metadata !5 ;; Derived From type
+}
+
+;;
+;; Define the Green field.
+;;
+!6 = metadata !{
+ i32 524301, ;; Tag
+ metadata !1, ;; Context
+ metadata !"Green", ;; Name
+ metadata !1, ;; File
+ i32 3, ;; Line number
+ i64 32, ;; Size in bits
+ i64 32, ;; Align in bits
+ i64 32, ;; Offset in bits
+ i32 0, ;; Flags
+ metadata !5 ;; Derived From type
+}
+
+;;
+;; Define the Blue field.
+;;
+!7 = metadata !{
+ i32 524301, ;; Tag
+ metadata !1, ;; Context
+ metadata !"Blue", ;; Name
+ metadata !1, ;; File
+ i32 4, ;; Line number
+ i64 32, ;; Size in bits
+ i64 32, ;; Align in bits
+ i64 64, ;; Offset in bits
+ i32 0, ;; Flags
+ metadata !5 ;; Derived From type
+}
+
+;;
+;; Define the array of fields used by the composite type Color.
+;;
+!3 = metadata !{metadata !4, metadata !6, metadata !7}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ccxx_enumeration_types">C/C++ enumeration types</a>
+</h3>
+
+<div>
+
+<p>Given the following as an example of C/C++ enumeration type:</p>
+
+<div class="doc_code">
+<pre>
+enum Trees {
+ Spruce = 100,
+ Oak = 200,
+ Maple = 300
+};
+</pre>
+</div>
+
+<p>a C/C++ front-end would generate the following descriptors:</p>
+
+<div class="doc_code">
+<pre>
+;;
+;; Define composite type for enum Trees
+;;
+!2 = metadata !{
+ i32 524292, ;; Tag
+ metadata !1, ;; Context
+ metadata !"Trees", ;; Name
+ metadata !1, ;; File
+ i32 1, ;; Line number
+ i64 32, ;; Size in bits
+ i64 32, ;; Align in bits
+ i64 0, ;; Offset in bits
+ i32 0, ;; Flags
+ null, ;; Derived From type
+ metadata !3, ;; Elements
+ i32 0 ;; Runtime language
+}
+
+;;
+;; Define the array of enumerators used by composite type Trees.
+;;
+!3 = metadata !{metadata !4, metadata !5, metadata !6}
+
+;;
+;; Define Spruce enumerator.
+;;
+!4 = metadata !{i32 524328, metadata !"Spruce", i64 100}
+
+;;
+;; Define Oak enumerator.
+;;
+!5 = metadata !{i32 524328, metadata !"Oak", i64 200}
+
+;;
+;; Define Maple enumerator.
+;;
+!6 = metadata !{i32 524328, metadata !"Maple", i64 300}
+
+</pre>
+</div>
+
+</div>
+
+</div>
+
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="llvmdwarfextension">Debugging information format</a>
+</h2>
+<!-- *********************************************************************** -->
+<div>
+<!-- ======================================================================= -->
+<h3>
+ <a name="objcproperty">Debugging Information Extension for Objective C Properties</a>
+</h3>
+<div>
+<!-- *********************************************************************** -->
+<h4>
+ <a name="objcpropertyintroduction">Introduction</a>
+</h4>
+<!-- *********************************************************************** -->
+
+<div>
+<p>Objective C provides a simpler way to declare and define accessor methods
+using declared properties. The language provides features to declare a
+property and to let compiler synthesize accessor methods.
+</p>
+
+<p>The debugger lets developer inspect Objective C interfaces and their
+instance variables and class variables. However, the debugger does not know
+anything about the properties defined in Objective C interfaces. The debugger
+consumes information generated by compiler in DWARF format. The format does
+not support encoding of Objective C properties. This proposal describes DWARF
+extensions to encode Objective C properties, which the debugger can use to let
+developers inspect Objective C properties.
+</p>
+
+</div>
+
+
+<!-- *********************************************************************** -->
+<h4>
+ <a name="objcpropertyproposal">Proposal</a>
+</h4>
+<!-- *********************************************************************** -->
+
+<div>
+<p>Objective C properties exist separately from class members. A property
+can be defined only by &quot;setter&quot; and &quot;getter&quot; selectors, and
+be calculated anew on each access. Or a property can just be a direct access
+to some declared ivar. Finally it can have an ivar &quot;automatically
+synthesized&quot; for it by the compiler, in which case the property can be
+referred to in user code directly using the standard C dereference syntax as
+well as through the property &quot;dot&quot; syntax, but there is no entry in
+the @interface declaration corresponding to this ivar.
+</p>
+<p>
+To facilitate debugging, these properties we will add a new DWARF TAG into the
+DW_TAG_structure_type definition for the class to hold the description of a
+given property, and a set of DWARF attributes that provide said description.
+The property tag will also contain the name and declared type of the property.
+</p>
+<p>
+If there is a related ivar, there will also be a DWARF property attribute placed
+in the DW_TAG_member DIE for that ivar referring back to the property TAG for
+that property. And in the case where the compiler synthesizes the ivar directly,
+the compiler is expected to generate a DW_TAG_member for that ivar (with the
+DW_AT_artificial set to 1), whose name will be the name used to access this
+ivar directly in code, and with the property attribute pointing back to the
+property it is backing.
+</p>
+<p>
+The following examples will serve as illustration for our discussion:
+</p>
+
+<div class="doc_code">
+<pre>
+@interface I1 {
+ int n2;
+}
+
+@property int p1;
+@property int p2;
+@end
+
+@implementation I1
+@synthesize p1;
+@synthesize p2 = n2;
+@end
+</pre>
+</div>
+
+<p>
+This produces the following DWARF (this is a &quot;pseudo dwarfdump&quot; output):
+</p>
+<div class="doc_code">
+<pre>
+0x00000100: TAG_structure_type [7] *
+ AT_APPLE_runtime_class( 0x10 )
+ AT_name( "I1" )
+ AT_decl_file( "Objc_Property.m" )
+ AT_decl_line( 3 )
+
+0x00000110 TAG_APPLE_property
+ AT_name ( "p1" )
+ AT_type ( {0x00000150} ( int ) )
+
+0x00000120: TAG_APPLE_property
+ AT_name ( "p2" )
+ AT_type ( {0x00000150} ( int ) )
+
+0x00000130: TAG_member [8]
+ AT_name( "_p1" )
+ AT_APPLE_property ( {0x00000110} "p1" )
+ AT_type( {0x00000150} ( int ) )
+ AT_artificial ( 0x1 )
+
+0x00000140: TAG_member [8]
+ AT_name( "n2" )
+ AT_APPLE_property ( {0x00000120} "p2" )
+ AT_type( {0x00000150} ( int ) )
+
+0x00000150: AT_type( ( int ) )
+</pre>
+</div>
+
+<p> Note, the current convention is that the name of the ivar for an
+auto-synthesized property is the name of the property from which it derives with
+an underscore prepended, as is shown in the example.
+But we actually don't need to know this convention, since we are given the name
+of the ivar directly.
+</p>
+
+<p>
+Also, it is common practice in ObjC to have different property declarations in
+the @interface and @implementation - e.g. to provide a read-only property in
+the interface,and a read-write interface in the implementation. In that case,
+the compiler should emit whichever property declaration will be in force in the
+current translation unit.
+</p>
+
+<p> Developers can decorate a property with attributes which are encoded using
+DW_AT_APPLE_property_attribute.
+</p>
+
+<div class="doc_code">
+<pre>
+@property (readonly, nonatomic) int pr;
+</pre>
+</div>
+<p>
+Which produces a property tag:
+<p>
+<div class="doc_code">
+<pre>
+TAG_APPLE_property [8]
+ AT_name( "pr" )
+ AT_type ( {0x00000147} (int) )
+ AT_APPLE_property_attribute (DW_APPLE_PROPERTY_readonly, DW_APPLE_PROPERTY_nonatomic)
+</pre>
+</div>
+
+<p> The setter and getter method names are attached to the property using
+DW_AT_APPLE_property_setter and DW_AT_APPLE_property_getter attributes.
+</p>
+<div class="doc_code">
+<pre>
+@interface I1
+@property (setter=myOwnP3Setter:) int p3;
+-(void)myOwnP3Setter:(int)a;
+@end
+
+@implementation I1
+@synthesize p3;
+-(void)myOwnP3Setter:(int)a{ }
+@end
+</pre>
+</div>
+
+<p>
+The DWARF for this would be:
+</p>
+<div class="doc_code">
+<pre>
+0x000003bd: TAG_structure_type [7] *
+ AT_APPLE_runtime_class( 0x10 )
+ AT_name( "I1" )
+ AT_decl_file( "Objc_Property.m" )
+ AT_decl_line( 3 )
+
+0x000003cd TAG_APPLE_property
+ AT_name ( "p3" )
+ AT_APPLE_property_setter ( "myOwnP3Setter:" )
+ AT_type( {0x00000147} ( int ) )
+
+0x000003f3: TAG_member [8]
+ AT_name( "_p3" )
+ AT_type ( {0x00000147} ( int ) )
+ AT_APPLE_property ( {0x000003cd} )
+ AT_artificial ( 0x1 )
+</pre>
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h4>
+ <a name="objcpropertynewtags">New DWARF Tags</a>
+</h4>
+<!-- *********************************************************************** -->
+
+<div>
+<table border="1" cellspacing="0">
+ <col width="200">
+ <col width="200">
+ <tr>
+ <th>TAG</th>
+ <th>Value</th>
+ </tr>
+ <tr>
+ <td>DW_TAG_APPLE_property</td>
+ <td>0x4200</td>
+ </tr>
+</table>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h4>
+ <a name="objcpropertynewattributes">New DWARF Attributes</a>
+</h4>
+<!-- *********************************************************************** -->
+
+<div>
+<table border="1" cellspacing="0">
+ <col width="200">
+ <col width="200">
+ <col width="200">
+ <tr>
+ <th>Attribute</th>
+ <th>Value</th>
+ <th>Classes</th>
+ </tr>
+ <tr>
+ <td>DW_AT_APPLE_property</td>
+ <td>0x3fed</td>
+ <td>Reference</td>
+ </tr>
+ <tr>
+ <td>DW_AT_APPLE_property_getter</td>
+ <td>0x3fe9</td>
+ <td>String</td>
+ </tr>
+ <tr>
+ <td>DW_AT_APPLE_property_setter</td>
+ <td>0x3fea</td>
+ <td>String</td>
+ </tr>
+ <tr>
+ <td>DW_AT_APPLE_property_attribute</td>
+ <td>0x3feb</td>
+ <td>Constant</td>
+ </tr>
+</table>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h4>
+ <a name="objcpropertynewconstants">New DWARF Constants</a>
+</h4>
+<!-- *********************************************************************** -->
+
+<div>
+<table border="1" cellspacing="0">
+ <col width="200">
+ <col width="200">
+ <tr>
+ <th>Name</th>
+ <th>Value</th>
+ </tr>
+ <tr>
+ <td>DW_AT_APPLE_PROPERTY_readonly</td>
+ <td>0x1</td>
+ </tr>
+ <tr>
+ <td>DW_AT_APPLE_PROPERTY_readwrite</td>
+ <td>0x2</td>
+ </tr>
+ <tr>
+ <td>DW_AT_APPLE_PROPERTY_assign</td>
+ <td>0x4</td>
+ </tr>
+ <tr>
+ <td>DW_AT_APPLE_PROPERTY_retain</td>
+ <td>0x8</td>
+ </tr>
+ <tr>
+ <td>DW_AT_APPLE_PROPERTY_copy</td>
+ <td>0x10</td>
+ </tr>
+ <tr>
+ <td>DW_AT_APPLE_PROPERTY_nonatomic</td>
+ <td>0x20</td>
+ </tr>
+</table>
+
+</div>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="acceltable">Name Accelerator Tables</a>
+</h3>
+<!-- ======================================================================= -->
+<div>
+<!-- ======================================================================= -->
+<h4>
+ <a name="acceltableintroduction">Introduction</a>
+</h4>
+<!-- ======================================================================= -->
+<div>
+<p>The .debug_pubnames and .debug_pubtypes formats are not what a debugger
+ needs. The "pub" in the section name indicates that the entries in the
+ table are publicly visible names only. This means no static or hidden
+ functions show up in the .debug_pubnames. No static variables or private class
+ variables are in the .debug_pubtypes. Many compilers add different things to
+ these tables, so we can't rely upon the contents between gcc, icc, or clang.</p>
+
+<p>The typical query given by users tends not to match up with the contents of
+ these tables. For example, the DWARF spec states that "In the case of the
+ name of a function member or static data member of a C++ structure, class or
+ union, the name presented in the .debug_pubnames section is not the simple
+ name given by the DW_AT_name attribute of the referenced debugging information
+ entry, but rather the fully qualified name of the data or function member."
+ So the only names in these tables for complex C++ entries is a fully
+ qualified name. Debugger users tend not to enter their search strings as
+ "a::b::c(int,const Foo&) const", but rather as "c", "b::c" , or "a::b::c". So
+ the name entered in the name table must be demangled in order to chop it up
+ appropriately and additional names must be manually entered into the table
+ to make it effective as a name lookup table for debuggers to use.</p>
+
+<p>All debuggers currently ignore the .debug_pubnames table as a result of
+ its inconsistent and useless public-only name content making it a waste of
+ space in the object file. These tables, when they are written to disk, are
+ not sorted in any way, leaving every debugger to do its own parsing
+ and sorting. These tables also include an inlined copy of the string values
+ in the table itself making the tables much larger than they need to be on
+ disk, especially for large C++ programs.</p>
+
+<p>Can't we just fix the sections by adding all of the names we need to this
+ table? No, because that is not what the tables are defined to contain and we
+ won't know the difference between the old bad tables and the new good tables.
+ At best we could make our own renamed sections that contain all of the data
+ we need.</p>
+
+<p>These tables are also insufficient for what a debugger like LLDB needs.
+ LLDB uses clang for its expression parsing where LLDB acts as a PCH. LLDB is
+ then often asked to look for type "foo" or namespace "bar", or list items in
+ namespace "baz". Namespaces are not included in the pubnames or pubtypes
+ tables. Since clang asks a lot of questions when it is parsing an expression,
+ we need to be very fast when looking up names, as it happens a lot. Having new
+ accelerator tables that are optimized for very quick lookups will benefit
+ this type of debugging experience greatly.</p>
+
+<p>We would like to generate name lookup tables that can be mapped into
+ memory from disk, and used as is, with little or no up-front parsing. We would
+ also be able to control the exact content of these different tables so they
+ contain exactly what we need. The Name Accelerator Tables were designed
+ to fix these issues. In order to solve these issues we need to:</p>
+
+<ul>
+ <li>Have a format that can be mapped into memory from disk and used as is</li>
+ <li>Lookups should be very fast</li>
+ <li>Extensible table format so these tables can be made by many producers</li>
+ <li>Contain all of the names needed for typical lookups out of the box</li>
+ <li>Strict rules for the contents of tables</li>
+</ul>
+
+<p>Table size is important and the accelerator table format should allow the
+ reuse of strings from common string tables so the strings for the names are
+ not duplicated. We also want to make sure the table is ready to be used as-is
+ by simply mapping the table into memory with minimal header parsing.</p>
+
+<p>The name lookups need to be fast and optimized for the kinds of lookups
+ that debuggers tend to do. Optimally we would like to touch as few parts of
+ the mapped table as possible when doing a name lookup and be able to quickly
+ find the name entry we are looking for, or discover there are no matches. In
+ the case of debuggers we optimized for lookups that fail most of the time.</p>
+
+<p>Each table that is defined should have strict rules on exactly what is in
+ the accelerator tables and documented so clients can rely on the content.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="acceltablehashes">Hash Tables</a>
+</h4>
+<!-- ======================================================================= -->
+
+<div>
+<h5>Standard Hash Tables</h5>
+
+<p>Typical hash tables have a header, buckets, and each bucket points to the
+bucket contents:
+</p>
+
+<div class="doc_code">
+<pre>
+.------------.
+| HEADER |
+|------------|
+| BUCKETS |
+|------------|
+| DATA |
+`------------'
+</pre>
+</div>
+
+<p>The BUCKETS are an array of offsets to DATA for each hash:</p>
+
+<div class="doc_code">
+<pre>
+.------------.
+| 0x00001000 | BUCKETS[0]
+| 0x00002000 | BUCKETS[1]
+| 0x00002200 | BUCKETS[2]
+| 0x000034f0 | BUCKETS[3]
+| | ...
+| 0xXXXXXXXX | BUCKETS[n_buckets]
+'------------'
+</pre>
+</div>
+
+<p>So for bucket[3] in the example above, we have an offset into the table
+ 0x000034f0 which points to a chain of entries for the bucket. Each bucket
+ must contain a next pointer, full 32 bit hash value, the string itself,
+ and the data for the current string value.</p>
+
+<div class="doc_code">
+<pre>
+ .------------.
+0x000034f0: | 0x00003500 | next pointer
+ | 0x12345678 | 32 bit hash
+ | "erase" | string value
+ | data[n] | HashData for this bucket
+ |------------|
+0x00003500: | 0x00003550 | next pointer
+ | 0x29273623 | 32 bit hash
+ | "dump" | string value
+ | data[n] | HashData for this bucket
+ |------------|
+0x00003550: | 0x00000000 | next pointer
+ | 0x82638293 | 32 bit hash
+ | "main" | string value
+ | data[n] | HashData for this bucket
+ `------------'
+</pre>
+</div>
+
+<p>The problem with this layout for debuggers is that we need to optimize for
+ the negative lookup case where the symbol we're searching for is not present.
+ So if we were to lookup "printf" in the table above, we would make a 32 hash
+ for "printf", it might match bucket[3]. We would need to go to the offset
+ 0x000034f0 and start looking to see if our 32 bit hash matches. To do so, we
+ need to read the next pointer, then read the hash, compare it, and skip to
+ the next bucket. Each time we are skipping many bytes in memory and touching
+ new cache pages just to do the compare on the full 32 bit hash. All of these
+ accesses then tell us that we didn't have a match.</p>
+
+<h5>Name Hash Tables</h5>
+
+<p>To solve the issues mentioned above we have structured the hash tables
+ a bit differently: a header, buckets, an array of all unique 32 bit hash
+ values, followed by an array of hash value data offsets, one for each hash
+ value, then the data for all hash values:</p>
+
+<div class="doc_code">
+<pre>
+.-------------.
+| HEADER |
+|-------------|
+| BUCKETS |
+|-------------|
+| HASHES |
+|-------------|
+| OFFSETS |
+|-------------|
+| DATA |
+`-------------'
+</pre>
+</div>
+
+<p>The BUCKETS in the name tables are an index into the HASHES array. By
+ making all of the full 32 bit hash values contiguous in memory, we allow
+ ourselves to efficiently check for a match while touching as little
+ memory as possible. Most often checking the 32 bit hash values is as far as
+ the lookup goes. If it does match, it usually is a match with no collisions.
+ So for a table with "n_buckets" buckets, and "n_hashes" unique 32 bit hash
+ values, we can clarify the contents of the BUCKETS, HASHES and OFFSETS as:</p>
+
+<div class="doc_code">
+<pre>
+.-------------------------.
+| HEADER.magic | uint32_t
+| HEADER.version | uint16_t
+| HEADER.hash_function | uint16_t
+| HEADER.bucket_count | uint32_t
+| HEADER.hashes_count | uint32_t
+| HEADER.header_data_len | uint32_t
+| HEADER_DATA | HeaderData
+|-------------------------|
+| BUCKETS | uint32_t[n_buckets] // 32 bit hash indexes
+|-------------------------|
+| HASHES | uint32_t[n_buckets] // 32 bit hash values
+|-------------------------|
+| OFFSETS | uint32_t[n_buckets] // 32 bit offsets to hash value data
+|-------------------------|
+| ALL HASH DATA |
+`-------------------------'
+</pre>
+</div>
+
+<p>So taking the exact same data from the standard hash example above we end up
+ with:</p>
+
+<div class="doc_code">
+<pre>
+ .------------.
+ | HEADER |
+ |------------|
+ | 0 | BUCKETS[0]
+ | 2 | BUCKETS[1]
+ | 5 | BUCKETS[2]
+ | 6 | BUCKETS[3]
+ | | ...
+ | ... | BUCKETS[n_buckets]
+ |------------|
+ | 0x........ | HASHES[0]
+ | 0x........ | HASHES[1]
+ | 0x........ | HASHES[2]
+ | 0x........ | HASHES[3]
+ | 0x........ | HASHES[4]
+ | 0x........ | HASHES[5]
+ | 0x12345678 | HASHES[6] hash for BUCKETS[3]
+ | 0x29273623 | HASHES[7] hash for BUCKETS[3]
+ | 0x82638293 | HASHES[8] hash for BUCKETS[3]
+ | 0x........ | HASHES[9]
+ | 0x........ | HASHES[10]
+ | 0x........ | HASHES[11]
+ | 0x........ | HASHES[12]
+ | 0x........ | HASHES[13]
+ | 0x........ | HASHES[n_hashes]
+ |------------|
+ | 0x........ | OFFSETS[0]
+ | 0x........ | OFFSETS[1]
+ | 0x........ | OFFSETS[2]
+ | 0x........ | OFFSETS[3]
+ | 0x........ | OFFSETS[4]
+ | 0x........ | OFFSETS[5]
+ | 0x000034f0 | OFFSETS[6] offset for BUCKETS[3]
+ | 0x00003500 | OFFSETS[7] offset for BUCKETS[3]
+ | 0x00003550 | OFFSETS[8] offset for BUCKETS[3]
+ | 0x........ | OFFSETS[9]
+ | 0x........ | OFFSETS[10]
+ | 0x........ | OFFSETS[11]
+ | 0x........ | OFFSETS[12]
+ | 0x........ | OFFSETS[13]
+ | 0x........ | OFFSETS[n_hashes]
+ |------------|
+ | |
+ | |
+ | |
+ | |
+ | |
+ |------------|
+0x000034f0: | 0x00001203 | .debug_str ("erase")
+ | 0x00000004 | A 32 bit array count - number of HashData with name "erase"
+ | 0x........ | HashData[0]
+ | 0x........ | HashData[1]
+ | 0x........ | HashData[2]
+ | 0x........ | HashData[3]
+ | 0x00000000 | String offset into .debug_str (terminate data for hash)
+ |------------|
+0x00003500: | 0x00001203 | String offset into .debug_str ("collision")
+ | 0x00000002 | A 32 bit array count - number of HashData with name "collision"
+ | 0x........ | HashData[0]
+ | 0x........ | HashData[1]
+ | 0x00001203 | String offset into .debug_str ("dump")
+ | 0x00000003 | A 32 bit array count - number of HashData with name "dump"
+ | 0x........ | HashData[0]
+ | 0x........ | HashData[1]
+ | 0x........ | HashData[2]
+ | 0x00000000 | String offset into .debug_str (terminate data for hash)
+ |------------|
+0x00003550: | 0x00001203 | String offset into .debug_str ("main")
+ | 0x00000009 | A 32 bit array count - number of HashData with name "main"
+ | 0x........ | HashData[0]
+ | 0x........ | HashData[1]
+ | 0x........ | HashData[2]
+ | 0x........ | HashData[3]
+ | 0x........ | HashData[4]
+ | 0x........ | HashData[5]
+ | 0x........ | HashData[6]
+ | 0x........ | HashData[7]
+ | 0x........ | HashData[8]
+ | 0x00000000 | String offset into .debug_str (terminate data for hash)
+ `------------'
+</pre>
+</div>
+
+<p>So we still have all of the same data, we just organize it more efficiently
+ for debugger lookup. If we repeat the same "printf" lookup from above, we
+ would hash "printf" and find it matches BUCKETS[3] by taking the 32 bit hash
+ value and modulo it by n_buckets. BUCKETS[3] contains "6" which is the index
+ into the HASHES table. We would then compare any consecutive 32 bit hashes
+ values in the HASHES array as long as the hashes would be in BUCKETS[3]. We
+ do this by verifying that each subsequent hash value modulo n_buckets is still
+ 3. In the case of a failed lookup we would access the memory for BUCKETS[3], and
+ then compare a few consecutive 32 bit hashes before we know that we have no match.
+ We don't end up marching through multiple words of memory and we really keep the
+ number of processor data cache lines being accessed as small as possible.</p>
+
+<p>The string hash that is used for these lookup tables is the Daniel J.
+ Bernstein hash which is also used in the ELF GNU_HASH sections. It is a very
+ good hash for all kinds of names in programs with very few hash collisions.</p>
+
+<p>Empty buckets are designated by using an invalid hash index of UINT32_MAX.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="acceltabledetails">Details</a>
+</h4>
+<!-- ======================================================================= -->
+<div>
+<p>These name hash tables are designed to be generic where specializations of
+ the table get to define additional data that goes into the header
+ ("HeaderData"), how the string value is stored ("KeyType") and the content
+ of the data for each hash value.</p>
+
+<h5>Header Layout</h5>
+<p>The header has a fixed part, and the specialized part. The exact format of
+ the header is:</p>
+<div class="doc_code">
+<pre>
+struct Header
+{
+ uint32_t magic; // 'HASH' magic value to allow endian detection
+ uint16_t version; // Version number
+ uint16_t hash_function; // The hash function enumeration that was used
+ uint32_t bucket_count; // The number of buckets in this hash table
+ uint32_t hashes_count; // The total number of unique hash values and hash data offsets in this table
+ uint32_t header_data_len; // The bytes to skip to get to the hash indexes (buckets) for correct alignment
+ // Specifically the length of the following HeaderData field - this does not
+ // include the size of the preceding fields
+ HeaderData header_data; // Implementation specific header data
+};
+</pre>
+</div>
+<p>The header starts with a 32 bit "magic" value which must be 'HASH' encoded as
+ an ASCII integer. This allows the detection of the start of the hash table and
+ also allows the table's byte order to be determined so the table can be
+ correctly extracted. The "magic" value is followed by a 16 bit version number
+ which allows the table to be revised and modified in the future. The current
+ version number is 1. "hash_function" is a uint16_t enumeration that specifies
+ which hash function was used to produce this table. The current values for the
+ hash function enumerations include:</p>
+<div class="doc_code">
+<pre>
+enum HashFunctionType
+{
+ eHashFunctionDJB = 0u, // Daniel J Bernstein hash function
+};
+</pre>
+</div>
+<p>"bucket_count" is a 32 bit unsigned integer that represents how many buckets
+ are in the BUCKETS array. "hashes_count" is the number of unique 32 bit hash
+ values that are in the HASHES array, and is the same number of offsets are
+ contained in the OFFSETS array. "header_data_len" specifies the size in
+ bytes of the HeaderData that is filled in by specialized versions of this
+ table.</p>
+
+<h5>Fixed Lookup</h5>
+<p>The header is followed by the buckets, hashes, offsets, and hash value
+ data.
+<div class="doc_code">
+<pre>
+struct FixedTable
+{
+ uint32_t buckets[Header.bucket_count]; // An array of hash indexes into the "hashes[]" array below
+ uint32_t hashes [Header.hashes_count]; // Every unique 32 bit hash for the entire table is in this table
+ uint32_t offsets[Header.hashes_count]; // An offset that corresponds to each item in the "hashes[]" array above
+};
+</pre>
+</div>
+<p>"buckets" is an array of 32 bit indexes into the "hashes" array. The
+ "hashes" array contains all of the 32 bit hash values for all names in the
+ hash table. Each hash in the "hashes" table has an offset in the "offsets"
+ array that points to the data for the hash value.</p>
+
+<p>This table setup makes it very easy to repurpose these tables to contain
+ different data, while keeping the lookup mechanism the same for all tables.
+ This layout also makes it possible to save the table to disk and map it in
+ later and do very efficient name lookups with little or no parsing.</p>
+
+<p>DWARF lookup tables can be implemented in a variety of ways and can store
+ a lot of information for each name. We want to make the DWARF tables
+ extensible and able to store the data efficiently so we have used some of the
+ DWARF features that enable efficient data storage to define exactly what kind
+ of data we store for each name.</p>
+
+<p>The "HeaderData" contains a definition of the contents of each HashData
+ chunk. We might want to store an offset to all of the debug information
+ entries (DIEs) for each name. To keep things extensible, we create a list of
+ items, or Atoms, that are contained in the data for each name. First comes the
+ type of the data in each atom:</p>
+<div class="doc_code">
+<pre>
+enum AtomType
+{
+ eAtomTypeNULL = 0u,
+ eAtomTypeDIEOffset = 1u, // DIE offset, check form for encoding
+ eAtomTypeCUOffset = 2u, // DIE offset of the compiler unit header that contains the item in question
+ eAtomTypeTag = 3u, // DW_TAG_xxx value, should be encoded as DW_FORM_data1 (if no tags exceed 255) or DW_FORM_data2
+ eAtomTypeNameFlags = 4u, // Flags from enum NameFlags
+ eAtomTypeTypeFlags = 5u, // Flags from enum TypeFlags
+};
+</pre>
+</div>
+<p>The enumeration values and their meanings are:</p>
+<div class="doc_code">
+<pre>
+ eAtomTypeNULL - a termination atom that specifies the end of the atom list
+ eAtomTypeDIEOffset - an offset into the .debug_info section for the DWARF DIE for this name
+ eAtomTypeCUOffset - an offset into the .debug_info section for the CU that contains the DIE
+ eAtomTypeDIETag - The DW_TAG_XXX enumeration value so you don't have to parse the DWARF to see what it is
+ eAtomTypeNameFlags - Flags for functions and global variables (isFunction, isInlined, isExternal...)
+ eAtomTypeTypeFlags - Flags for types (isCXXClass, isObjCClass, ...)
+</pre>
+</div>
+<p>Then we allow each atom type to define the atom type and how the data for
+ each atom type data is encoded:</p>
+<div class="doc_code">
+<pre>
+struct Atom
+{
+ uint16_t type; // AtomType enum value
+ uint16_t form; // DWARF DW_FORM_XXX defines
+};
+</pre>
+</div>
+<p>The "form" type above is from the DWARF specification and defines the
+ exact encoding of the data for the Atom type. See the DWARF specification for
+ the DW_FORM_ definitions.</p>
+<div class="doc_code">
+<pre>
+struct HeaderData
+{
+ uint32_t die_offset_base;
+ uint32_t atom_count;
+ Atoms atoms[atom_count0];
+};
+</pre>
+</div>
+<p>"HeaderData" defines the base DIE offset that should be added to any atoms
+ that are encoded using the DW_FORM_ref1, DW_FORM_ref2, DW_FORM_ref4,
+ DW_FORM_ref8 or DW_FORM_ref_udata. It also defines what is contained in
+ each "HashData" object -- Atom.form tells us how large each field will be in
+ the HashData and the Atom.type tells us how this data should be interpreted.</p>
+
+<p>For the current implementations of the ".apple_names" (all functions + globals),
+ the ".apple_types" (names of all types that are defined), and the
+ ".apple_namespaces" (all namespaces), we currently set the Atom array to be:</p>
+<div class="doc_code">
+<pre>
+HeaderData.atom_count = 1;
+HeaderData.atoms[0].type = eAtomTypeDIEOffset;
+HeaderData.atoms[0].form = DW_FORM_data4;
+</pre>
+</div>
+<p>This defines the contents to be the DIE offset (eAtomTypeDIEOffset) that is
+ encoded as a 32 bit value (DW_FORM_data4). This allows a single name to have
+ multiple matching DIEs in a single file, which could come up with an inlined
+ function for instance. Future tables could include more information about the
+ DIE such as flags indicating if the DIE is a function, method, block,
+ or inlined.</p>
+
+<p>The KeyType for the DWARF table is a 32 bit string table offset into the
+ ".debug_str" table. The ".debug_str" is the string table for the DWARF which
+ may already contain copies of all of the strings. This helps make sure, with
+ help from the compiler, that we reuse the strings between all of the DWARF
+ sections and keeps the hash table size down. Another benefit to having the
+ compiler generate all strings as DW_FORM_strp in the debug info, is that
+ DWARF parsing can be made much faster.</p>
+
+<p>After a lookup is made, we get an offset into the hash data. The hash data
+ needs to be able to deal with 32 bit hash collisions, so the chunk of data
+ at the offset in the hash data consists of a triple:</p>
+<div class="doc_code">
+<pre>
+uint32_t str_offset
+uint32_t hash_data_count
+HashData[hash_data_count]
+</pre>
+</div>
+<p>If "str_offset" is zero, then the bucket contents are done. 99.9% of the
+ hash data chunks contain a single item (no 32 bit hash collision):</p>
+<div class="doc_code">
+<pre>
+.------------.
+| 0x00001023 | uint32_t KeyType (.debug_str[0x0001023] => "main")
+| 0x00000004 | uint32_t HashData count
+| 0x........ | uint32_t HashData[0] DIE offset
+| 0x........ | uint32_t HashData[1] DIE offset
+| 0x........ | uint32_t HashData[2] DIE offset
+| 0x........ | uint32_t HashData[3] DIE offset
+| 0x00000000 | uint32_t KeyType (end of hash chain)
+`------------'
+</pre>
+</div>
+<p>If there are collisions, you will have multiple valid string offsets:</p>
+<div class="doc_code">
+<pre>
+.------------.
+| 0x00001023 | uint32_t KeyType (.debug_str[0x0001023] => "main")
+| 0x00000004 | uint32_t HashData count
+| 0x........ | uint32_t HashData[0] DIE offset
+| 0x........ | uint32_t HashData[1] DIE offset
+| 0x........ | uint32_t HashData[2] DIE offset
+| 0x........ | uint32_t HashData[3] DIE offset
+| 0x00002023 | uint32_t KeyType (.debug_str[0x0002023] => "print")
+| 0x00000002 | uint32_t HashData count
+| 0x........ | uint32_t HashData[0] DIE offset
+| 0x........ | uint32_t HashData[1] DIE offset
+| 0x00000000 | uint32_t KeyType (end of hash chain)
+`------------'
+</pre>
+</div>
+<p>Current testing with real world C++ binaries has shown that there is around 1
+ 32 bit hash collision per 100,000 name entries.</p>
+</div>
+<!-- ======================================================================= -->
+<h4>
+ <a name="acceltablecontents">Contents</a>
+</h4>
+<!-- ======================================================================= -->
+<div>
+<p>As we said, we want to strictly define exactly what is included in the
+ different tables. For DWARF, we have 3 tables: ".apple_names", ".apple_types",
+ and ".apple_namespaces".</p>
+
+<p>".apple_names" sections should contain an entry for each DWARF DIE whose
+ DW_TAG is a DW_TAG_label, DW_TAG_inlined_subroutine, or DW_TAG_subprogram that
+ has address attributes: DW_AT_low_pc, DW_AT_high_pc, DW_AT_ranges or
+ DW_AT_entry_pc. It also contains DW_TAG_variable DIEs that have a DW_OP_addr
+ in the location (global and static variables). All global and static variables
+ should be included, including those scoped within functions and classes. For
+ example using the following code:</p>
+<div class="doc_code">
+<pre>
+static int var = 0;
+
+void f ()
+{
+ static int var = 0;
+}
+</pre>
+</div>
+<p>Both of the static "var" variables would be included in the table. All
+ functions should emit both their full names and their basenames. For C or C++,
+ the full name is the mangled name (if available) which is usually in the
+ DW_AT_MIPS_linkage_name attribute, and the DW_AT_name contains the function
+ basename. If global or static variables have a mangled name in a
+ DW_AT_MIPS_linkage_name attribute, this should be emitted along with the
+ simple name found in the DW_AT_name attribute.</p>
+
+<p>".apple_types" sections should contain an entry for each DWARF DIE whose
+ tag is one of:</p>
+<ul>
+ <li>DW_TAG_array_type</li>
+ <li>DW_TAG_class_type</li>
+ <li>DW_TAG_enumeration_type</li>
+ <li>DW_TAG_pointer_type</li>
+ <li>DW_TAG_reference_type</li>
+ <li>DW_TAG_string_type</li>
+ <li>DW_TAG_structure_type</li>
+ <li>DW_TAG_subroutine_type</li>
+ <li>DW_TAG_typedef</li>
+ <li>DW_TAG_union_type</li>
+ <li>DW_TAG_ptr_to_member_type</li>
+ <li>DW_TAG_set_type</li>
+ <li>DW_TAG_subrange_type</li>
+ <li>DW_TAG_base_type</li>
+ <li>DW_TAG_const_type</li>
+ <li>DW_TAG_constant</li>
+ <li>DW_TAG_file_type</li>
+ <li>DW_TAG_namelist</li>
+ <li>DW_TAG_packed_type</li>
+ <li>DW_TAG_volatile_type</li>
+ <li>DW_TAG_restrict_type</li>
+ <li>DW_TAG_interface_type</li>
+ <li>DW_TAG_unspecified_type</li>
+ <li>DW_TAG_shared_type</li>
+</ul>
+<p>Only entries with a DW_AT_name attribute are included, and the entry must
+ not be a forward declaration (DW_AT_declaration attribute with a non-zero value).
+ For example, using the following code:</p>
+<div class="doc_code">
+<pre>
+int main ()
+{
+ int *b = 0;
+ return *b;
+}
+</pre>
+</div>
+<p>We get a few type DIEs:</p>
+<div class="doc_code">
+<pre>
+0x00000067: TAG_base_type [5]
+ AT_encoding( DW_ATE_signed )
+ AT_name( "int" )
+ AT_byte_size( 0x04 )
+
+0x0000006e: TAG_pointer_type [6]
+ AT_type( {0x00000067} ( int ) )
+ AT_byte_size( 0x08 )
+</pre>
+</div>
+<p>The DW_TAG_pointer_type is not included because it does not have a DW_AT_name.</p>
+
+<p>".apple_namespaces" section should contain all DW_TAG_namespace DIEs. If
+ we run into a namespace that has no name this is an anonymous namespace,
+ and the name should be output as "(anonymous namespace)" (without the quotes).
+ Why? This matches the output of the abi::cxa_demangle() that is in the standard
+ C++ library that demangles mangled names.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h4>
+ <a name="acceltableextensions">Language Extensions and File Format Changes</a>
+</h4>
+<!-- ======================================================================= -->
+<div>
+<h5>Objective-C Extensions</h5>
+<p>".apple_objc" section should contain all DW_TAG_subprogram DIEs for an
+ Objective-C class. The name used in the hash table is the name of the
+ Objective-C class itself. If the Objective-C class has a category, then an
+ entry is made for both the class name without the category, and for the class
+ name with the category. So if we have a DIE at offset 0x1234 with a name
+ of method "-[NSString(my_additions) stringWithSpecialString:]", we would add
+ an entry for "NSString" that points to DIE 0x1234, and an entry for
+ "NSString(my_additions)" that points to 0x1234. This allows us to quickly
+ track down all Objective-C methods for an Objective-C class when doing
+ expressions. It is needed because of the dynamic nature of Objective-C where
+ anyone can add methods to a class. The DWARF for Objective-C methods is also
+ emitted differently from C++ classes where the methods are not usually
+ contained in the class definition, they are scattered about across one or more
+ compile units. Categories can also be defined in different shared libraries.
+ So we need to be able to quickly find all of the methods and class functions
+ given the Objective-C class name, or quickly find all methods and class
+ functions for a class + category name. This table does not contain any selector
+ names, it just maps Objective-C class names (or class names + category) to all
+ of the methods and class functions. The selectors are added as function
+ basenames in the .debug_names section.</p>
+
+<p>In the ".apple_names" section for Objective-C functions, the full name is the
+ entire function name with the brackets ("-[NSString stringWithCString:]") and the
+ basename is the selector only ("stringWithCString:").</p>
+
+<h5>Mach-O Changes</h5>
+<p>The sections names for the apple hash tables are for non mach-o files. For
+ mach-o files, the sections should be contained in the "__DWARF" segment with
+ names as follows:</p>
+<ul>
+ <li>".apple_names" -> "__apple_names"</li>
+ <li>".apple_types" -> "__apple_types"</li>
+ <li>".apple_namespaces" -> "__apple_namespac" (16 character limit)</li>
+ <li> ".apple_objc" -> "__apple_objc"</li>
+</ul>
+</div>
+</div>
+</div>
+
+<!-- *********************************************************************** -->
+
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+
+</body>
+</html>
diff --git a/docs/SystemLibrary.html b/docs/SystemLibrary.html
new file mode 100644
index 00000000000..1ef221fa274
--- /dev/null
+++ b/docs/SystemLibrary.html
@@ -0,0 +1,316 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>System Library</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+<body>
+
+<h1>System Library</h1>
+<ul>
+ <li><a href="#abstract">Abstract</a></li>
+ <li><a href="#requirements">Keeping LLVM Portable</a>
+ <ol>
+ <li><a href="#headers">Don't Include System Headers</a></li>
+ <li><a href="#expose">Don't Expose System Headers</a></li>
+ <li><a href="#c_headers">Allow Standard C Header Files</a></li>
+ <li><a href="#cpp_headers">Allow Standard C++ Header Files</a></li>
+ <li><a href="#highlev">High-Level Interface</a></li>
+ <li><a href="#nofunc">No Exposed Functions</a></li>
+ <li><a href="#nodata">No Exposed Data</a></li>
+ <li><a href="#nodupl">No Duplicate Implementations</a></li>
+ <li><a href="#nounused">No Unused Functionality</a></li>
+ <li><a href="#virtuals">No Virtual Methods</a></li>
+ <li><a href="#softerrors">Minimize Soft Errors</a></li>
+ <li><a href="#throw_spec">No throw() Specifications</a></li>
+ <li><a href="#organization">Code Organization</a></li>
+ <li><a href="#semantics">Consistent Semantics</a></li>
+ <li><a href="#bug">Tracking Bugzilla Bug: 351</a></li>
+ </ol></li>
+</ul>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:rspencer@x10sys.com">Reid Spencer</a></p>
+</div>
+
+
+<!-- *********************************************************************** -->
+<h2><a name="abstract">Abstract</a></h2>
+<div>
+ <p>This document provides some details on LLVM's System Library, located in
+ the source at <tt>lib/System</tt> and <tt>include/llvm/System</tt>. The
+ library's purpose is to shield LLVM from the differences between operating
+ systems for the few services LLVM needs from the operating system. Much of
+ LLVM is written using portability features of standard C++. However, in a few
+ areas, system dependent facilities are needed and the System Library is the
+ wrapper around those system calls.</p>
+ <p>By centralizing LLVM's use of operating system interfaces, we make it
+ possible for the LLVM tool chain and runtime libraries to be more easily
+ ported to new platforms since (theoretically) only <tt>lib/System</tt> needs
+ to be ported. This library also unclutters the rest of LLVM from #ifdef use
+ and special cases for specific operating systems. Such uses are replaced
+ with simple calls to the interfaces provided in <tt>include/llvm/System</tt>.
+ </p>
+ <p>Note that the System Library is not intended to be a complete operating
+ system wrapper (such as the Adaptive Communications Environment (ACE) or
+ Apache Portable Runtime (APR)), but only provides the functionality necessary
+ to support LLVM.
+ <p>The System Library was written by Reid Spencer who formulated the
+ design based on similar work originating from the eXtensible Programming
+ System (XPS). Several people helped with the effort; especially,
+ Jeff Cohen and Henrik Bach on the Win32 port.</p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="requirements">Keeping LLVM Portable</a>
+</h2>
+<div>
+ <p>In order to keep LLVM portable, LLVM developers should adhere to a set of
+ portability rules associated with the System Library. Adherence to these rules
+ should help the System Library achieve its goal of shielding LLVM from the
+ variations in operating system interfaces and doing so efficiently. The
+ following sections define the rules needed to fulfill this objective.</p>
+
+<!-- ======================================================================= -->
+<h3><a name="headers">Don't Include System Headers</a></h3>
+<div>
+ <p>Except in <tt>lib/System</tt>, no LLVM source code should directly
+ <tt>#include</tt> a system header. Care has been taken to remove all such
+ <tt>#includes</tt> from LLVM while <tt>lib/System</tt> was being
+ developed. Specifically this means that header files like "unistd.h",
+ "windows.h", "stdio.h", and "string.h" are forbidden to be included by LLVM
+ source code outside the implementation of <tt>lib/System</tt>.</p>
+ <p>To obtain system-dependent functionality, existing interfaces to the system
+ found in <tt>include/llvm/System</tt> should be used. If an appropriate
+ interface is not available, it should be added to <tt>include/llvm/System</tt>
+ and implemented in <tt>lib/System</tt> for all supported platforms.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="expose">Don't Expose System Headers</a></h3>
+<div>
+ <p>The System Library must shield LLVM from <em>all</em> system headers. To
+ obtain system level functionality, LLVM source must
+ <tt>#include "llvm/System/Thing.h"</tt> and nothing else. This means that
+ <tt>Thing.h</tt> cannot expose any system header files. This protects LLVM
+ from accidentally using system specific functionality and only allows it
+ via the <tt>lib/System</tt> interface.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="c_headers">Use Standard C Headers</a></h3>
+<div>
+ <p>The <em>standard</em> C headers (the ones beginning with "c") are allowed
+ to be exposed through the <tt>lib/System</tt> interface. These headers and
+ the things they declare are considered to be platform agnostic. LLVM source
+ files may include them directly or obtain their inclusion through
+ <tt>lib/System</tt> interfaces.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="cpp_headers">Use Standard C++ Headers</a></h3>
+<div>
+ <p>The <em>standard</em> C++ headers from the standard C++ library and
+ standard template library may be exposed through the <tt>lib/System</tt>
+ interface. These headers and the things they declare are considered to be
+ platform agnostic. LLVM source files may include them or obtain their
+ inclusion through lib/System interfaces.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="highlev">High Level Interface</a></h3>
+<div>
+ <p>The entry points specified in the interface of lib/System must be aimed at
+ completing some reasonably high level task needed by LLVM. We do not want to
+ simply wrap each operating system call. It would be preferable to wrap several
+ operating system calls that are always used in conjunction with one another by
+ LLVM.</p>
+ <p>For example, consider what is needed to execute a program, wait for it to
+ complete, and return its result code. On Unix, this involves the following
+ operating system calls: <tt>getenv, fork, execve,</tt> and <tt>wait</tt>. The
+ correct thing for lib/System to provide is a function, say
+ <tt>ExecuteProgramAndWait</tt>, that implements the functionality completely.
+ what we don't want is wrappers for the operating system calls involved.</p>
+ <p>There must <em>not</em> be a one-to-one relationship between operating
+ system calls and the System library's interface. Any such interface function
+ will be suspicious.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="nounused">No Unused Functionality</a></h3>
+<div>
+ <p>There must be no functionality specified in the interface of lib/System
+ that isn't actually used by LLVM. We're not writing a general purpose
+ operating system wrapper here, just enough to satisfy LLVM's needs. And, LLVM
+ doesn't need much. This design goal aims to keep the lib/System interface
+ small and understandable which should foster its actual use and adoption.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="nodupl">No Duplicate Implementations</a></h3>
+<div>
+ <p>The implementation of a function for a given platform must be written
+ exactly once. This implies that it must be possible to apply a function's
+ implementation to multiple operating systems if those operating systems can
+ share the same implementation. This rule applies to the set of operating
+ systems supported for a given class of operating system (e.g. Unix, Win32).
+ </p>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="virtuals">No Virtual Methods</a></h3>
+<div>
+ <p>The System Library interfaces can be called quite frequently by LLVM. In
+ order to make those calls as efficient as possible, we discourage the use of
+ virtual methods. There is no need to use inheritance for implementation
+ differences, it just adds complexity. The <tt>#include</tt> mechanism works
+ just fine.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="nofunc">No Exposed Functions</a></h3>
+<div>
+ <p>Any functions defined by system libraries (i.e. not defined by lib/System)
+ must not be exposed through the lib/System interface, even if the header file
+ for that function is not exposed. This prevents inadvertent use of system
+ specific functionality.</p>
+ <p>For example, the <tt>stat</tt> system call is notorious for having
+ variations in the data it provides. <tt>lib/System</tt> must not declare
+ <tt>stat</tt> nor allow it to be declared. Instead it should provide its own
+ interface to discovering information about files and directories. Those
+ interfaces may be implemented in terms of <tt>stat</tt> but that is strictly
+ an implementation detail. The interface provided by the System Library must
+ be implemented on all platforms (even those without <tt>stat</tt>).</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="nodata">No Exposed Data</a></h3>
+<div>
+ <p>Any data defined by system libraries (i.e. not defined by lib/System) must
+ not be exposed through the lib/System interface, even if the header file for
+ that function is not exposed. As with functions, this prevents inadvertent use
+ of data that might not exist on all platforms.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="softerrors">Minimize Soft Errors</a></h3>
+<div>
+ <p>Operating system interfaces will generally provide error results for every
+ little thing that could go wrong. In almost all cases, you can divide these
+ error results into two groups: normal/good/soft and abnormal/bad/hard. That
+ is, some of the errors are simply information like "file not found",
+ "insufficient privileges", etc. while other errors are much harder like
+ "out of space", "bad disk sector", or "system call interrupted". We'll call
+ the first group "<i>soft</i>" errors and the second group "<i>hard</i>"
+ errors.<p>
+ <p>lib/System must always attempt to minimize soft errors.
+ This is a design requirement because the
+ minimization of soft errors can affect the granularity and the nature of the
+ interface. In general, if you find that you're wanting to throw soft errors,
+ you must review the granularity of the interface because it is likely you're
+ trying to implement something that is too low level. The rule of thumb is to
+ provide interface functions that <em>can't</em> fail, except when faced with
+ hard errors.</p>
+ <p>For a trivial example, suppose we wanted to add an "OpenFileForWriting"
+ function. For many operating systems, if the file doesn't exist, attempting
+ to open the file will produce an error. However, lib/System should not
+ simply throw that error if it occurs because its a soft error. The problem
+ is that the interface function, OpenFileForWriting is too low level. It should
+ be OpenOrCreateFileForWriting. In the case of the soft "doesn't exist" error,
+ this function would just create it and then open it for writing.</p>
+ <p>This design principle needs to be maintained in lib/System because it
+ avoids the propagation of soft error handling throughout the rest of LLVM.
+ Hard errors will generally just cause a termination for an LLVM tool so don't
+ be bashful about throwing them.</p>
+ <p>Rules of thumb:</p>
+ <ol>
+ <li>Don't throw soft errors, only hard errors.</li>
+ <li>If you're tempted to throw a soft error, re-think the interface.</li>
+ <li>Handle internally the most common normal/good/soft error conditions
+ so the rest of LLVM doesn't have to.</li>
+ </ol>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="throw_spec">No throw Specifications</a></h3>
+<div>
+ <p>None of the lib/System interface functions may be declared with C++
+ <tt>throw()</tt> specifications on them. This requirement makes sure that the
+ compiler does not insert additional exception handling code into the interface
+ functions. This is a performance consideration: lib/System functions are at
+ the bottom of many call chains and as such can be frequently called. We
+ need them to be as efficient as possible. However, no routines in the
+ system library should actually throw exceptions.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="organization">Code Organization</a></h3>
+<div>
+ <p>Implementations of the System Library interface are separated by their
+ general class of operating system. Currently only Unix and Win32 classes are
+ defined but more could be added for other operating system classifications.
+ To distinguish which implementation to compile, the code in lib/System uses
+ the LLVM_ON_UNIX and LLVM_ON_WIN32 #defines provided via configure through the
+ llvm/Config/config.h file. Each source file in lib/System, after implementing
+ the generic (operating system independent) functionality needs to include the
+ correct implementation using a set of <tt>#if defined(LLVM_ON_XYZ)</tt>
+ directives. For example, if we had lib/System/File.cpp, we'd expect to see in
+ that file:</p>
+ <pre><tt>
+ #if defined(LLVM_ON_UNIX)
+ #include "Unix/File.cpp"
+ #endif
+ #if defined(LLVM_ON_WIN32)
+ #include "Win32/File.cpp"
+ #endif
+ </tt></pre>
+ <p>The implementation in lib/System/Unix/File.cpp should handle all Unix
+ variants. The implementation in lib/System/Win32/File.cpp should handle all
+ Win32 variants. What this does is quickly differentiate the basic class of
+ operating system that will provide the implementation. The specific details
+ for a given platform must still be determined through the use of
+ <tt>#ifdef</tt>.</p>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="semantics">Consistent Semantics</a></h3>
+<div>
+ <p>The implementation of a lib/System interface can vary drastically between
+ platforms. That's okay as long as the end result of the interface function
+ is the same. For example, a function to create a directory is pretty straight
+ forward on all operating system. System V IPC on the other hand isn't even
+ supported on all platforms. Instead of "supporting" System V IPC, lib/System
+ should provide an interface to the basic concept of inter-process
+ communications. The implementations might use System V IPC if that was
+ available or named pipes, or whatever gets the job done effectively for a
+ given operating system. In all cases, the interface and the implementation
+ must be semantically consistent. </p>
+</div>
+
+<!-- ======================================================================= -->
+<h3><a name="bug">Bug 351</a></h3>
+<div>
+ <p>See <a href="http://llvm.org/PR351">bug 351</a>
+ for further details on the progress of this work</p>
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="mailto:rspencer@x10sys.com">Reid Spencer</a><br>
+ <a href="http://llvm.org/">LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/TableGenFundamentals.rst b/docs/TableGenFundamentals.rst
new file mode 100644
index 00000000000..bfb2618998a
--- /dev/null
+++ b/docs/TableGenFundamentals.rst
@@ -0,0 +1,799 @@
+.. _tablegen:
+
+=====================
+TableGen Fundamentals
+=====================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+TableGen's purpose is to help a human develop and maintain records of
+domain-specific information. Because there may be a large number of these
+records, it is specifically designed to allow writing flexible descriptions and
+for common features of these records to be factored out. This reduces the
+amount of duplication in the description, reduces the chance of error, and makes
+it easier to structure domain specific information.
+
+The core part of TableGen `parses a file`_, instantiates the declarations, and
+hands the result off to a domain-specific `TableGen backend`_ for processing.
+The current major user of TableGen is the `LLVM code
+generator <CodeGenerator.html>`_.
+
+Note that if you work on TableGen much, and use emacs or vim, that you can find
+an emacs "TableGen mode" and a vim language file in the ``llvm/utils/emacs`` and
+``llvm/utils/vim`` directories of your LLVM distribution, respectively.
+
+.. _intro:
+
+Basic concepts
+--------------
+
+TableGen files consist of two key parts: 'classes' and 'definitions', both of
+which are considered 'records'.
+
+**TableGen records** have a unique name, a list of values, and a list of
+superclasses. The list of values is the main data that TableGen builds for each
+record; it is this that holds the domain specific information for the
+application. The interpretation of this data is left to a specific `TableGen
+backend`_, but the structure and format rules are taken care of and are fixed by
+TableGen.
+
+**TableGen definitions** are the concrete form of 'records'. These generally do
+not have any undefined values, and are marked with the '``def``' keyword.
+
+**TableGen classes** are abstract records that are used to build and describe
+other records. These 'classes' allow the end-user to build abstractions for
+either the domain they are targeting (such as "Register", "RegisterClass", and
+"Instruction" in the LLVM code generator) or for the implementor to help factor
+out common properties of records (such as "FPInst", which is used to represent
+floating point instructions in the X86 backend). TableGen keeps track of all of
+the classes that are used to build up a definition, so the backend can find all
+definitions of a particular class, such as "Instruction".
+
+**TableGen multiclasses** are groups of abstract records that are instantiated
+all at once. Each instantiation can result in multiple TableGen definitions.
+If a multiclass inherits from another multiclass, the definitions in the
+sub-multiclass become part of the current multiclass, as if they were declared
+in the current multiclass.
+
+.. _described above:
+
+An example record
+-----------------
+
+With no other arguments, TableGen parses the specified file and prints out all
+of the classes, then all of the definitions. This is a good way to see what the
+various definitions expand to fully. Running this on the ``X86.td`` file prints
+this (at the time of this writing):
+
+.. code-block:: llvm
+
+ ...
+ def ADD32rr { // Instruction X86Inst I
+ string Namespace = "X86";
+ dag OutOperandList = (outs GR32:$dst);
+ dag InOperandList = (ins GR32:$src1, GR32:$src2);
+ string AsmString = "add{l}\t{$src2, $dst|$dst, $src2}";
+ list<dag> Pattern = [(set GR32:$dst, (add GR32:$src1, GR32:$src2))];
+ list<Register> Uses = [];
+ list<Register> Defs = [EFLAGS];
+ list<Predicate> Predicates = [];
+ int CodeSize = 3;
+ int AddedComplexity = 0;
+ bit isReturn = 0;
+ bit isBranch = 0;
+ bit isIndirectBranch = 0;
+ bit isBarrier = 0;
+ bit isCall = 0;
+ bit canFoldAsLoad = 0;
+ bit mayLoad = 0;
+ bit mayStore = 0;
+ bit isImplicitDef = 0;
+ bit isConvertibleToThreeAddress = 1;
+ bit isCommutable = 1;
+ bit isTerminator = 0;
+ bit isReMaterializable = 0;
+ bit isPredicable = 0;
+ bit hasDelaySlot = 0;
+ bit usesCustomInserter = 0;
+ bit hasCtrlDep = 0;
+ bit isNotDuplicable = 0;
+ bit hasSideEffects = 0;
+ bit neverHasSideEffects = 0;
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "";
+ string DisableEncoding = "";
+ bits<8> Opcode = { 0, 0, 0, 0, 0, 0, 0, 1 };
+ Format Form = MRMDestReg;
+ bits<6> FormBits = { 0, 0, 0, 0, 1, 1 };
+ ImmType ImmT = NoImm;
+ bits<3> ImmTypeBits = { 0, 0, 0 };
+ bit hasOpSizePrefix = 0;
+ bit hasAdSizePrefix = 0;
+ bits<4> Prefix = { 0, 0, 0, 0 };
+ bit hasREX_WPrefix = 0;
+ FPFormat FPForm = ?;
+ bits<3> FPFormBits = { 0, 0, 0 };
+ }
+ ...
+
+This definition corresponds to a 32-bit register-register add instruction in the
+X86. The string after the '``def``' string indicates the name of the
+record---"``ADD32rr``" in this case---and the comment at the end of the line
+indicates the superclasses of the definition. The body of the record contains
+all of the data that TableGen assembled for the record, indicating that the
+instruction is part of the "X86" namespace, the pattern indicating how the the
+instruction should be emitted into the assembly file, that it is a two-address
+instruction, has a particular encoding, etc. The contents and semantics of the
+information in the record is specific to the needs of the X86 backend, and is
+only shown as an example.
+
+As you can see, a lot of information is needed for every instruction supported
+by the code generator, and specifying it all manually would be unmaintainable,
+prone to bugs, and tiring to do in the first place. Because we are using
+TableGen, all of the information was derived from the following definition:
+
+.. code-block:: llvm
+
+ let Defs = [EFLAGS],
+ isCommutable = 1, // X = ADD Y,Z --> X = ADD Z,Y
+ isConvertibleToThreeAddress = 1 in // Can transform into LEA.
+ def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst),
+ (ins GR32:$src1, GR32:$src2),
+ "add{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
+
+This definition makes use of the custom class ``I`` (extended from the custom
+class ``X86Inst``), which is defined in the X86-specific TableGen file, to
+factor out the common features that instructions of its class share. A key
+feature of TableGen is that it allows the end-user to define the abstractions
+they prefer to use when describing their information.
+
+Each def record has a special entry called "``NAME``." This is the name of the
+def ("``ADD32rr``" above). In the general case def names can be formed from
+various kinds of string processing expressions and ``NAME`` resolves to the
+final value obtained after resolving all of those expressions. The user may
+refer to ``NAME`` anywhere she desires to use the ultimate name of the def.
+``NAME`` should not be defined anywhere else in user code to avoid conflict
+problems.
+
+Running TableGen
+----------------
+
+TableGen runs just like any other LLVM tool. The first (optional) argument
+specifies the file to read. If a filename is not specified, ``llvm-tblgen``
+reads from standard input.
+
+To be useful, one of the `TableGen backends`_ must be used. These backends are
+selectable on the command line (type '``llvm-tblgen -help``' for a list). For
+example, to get a list of all of the definitions that subclass a particular type
+(which can be useful for building up an enum list of these records), use the
+``-print-enums`` option:
+
+.. code-block:: bash
+
+ $ llvm-tblgen X86.td -print-enums -class=Register
+ AH, AL, AX, BH, BL, BP, BPL, BX, CH, CL, CX, DH, DI, DIL, DL, DX, EAX, EBP, EBX,
+ ECX, EDI, EDX, EFLAGS, EIP, ESI, ESP, FP0, FP1, FP2, FP3, FP4, FP5, FP6, IP,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, R10, R10B, R10D, R10W, R11, R11B, R11D,
+ R11W, R12, R12B, R12D, R12W, R13, R13B, R13D, R13W, R14, R14B, R14D, R14W, R15,
+ R15B, R15D, R15W, R8, R8B, R8D, R8W, R9, R9B, R9D, R9W, RAX, RBP, RBX, RCX, RDI,
+ RDX, RIP, RSI, RSP, SI, SIL, SP, SPL, ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
+ XMM0, XMM1, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, XMM2, XMM3, XMM4, XMM5,
+ XMM6, XMM7, XMM8, XMM9,
+
+ $ llvm-tblgen X86.td -print-enums -class=Instruction
+ ABS_F, ABS_Fp32, ABS_Fp64, ABS_Fp80, ADC32mi, ADC32mi8, ADC32mr, ADC32ri,
+ ADC32ri8, ADC32rm, ADC32rr, ADC64mi32, ADC64mi8, ADC64mr, ADC64ri32, ADC64ri8,
+ ADC64rm, ADC64rr, ADD16mi, ADD16mi8, ADD16mr, ADD16ri, ADD16ri8, ADD16rm,
+ ADD16rr, ADD32mi, ADD32mi8, ADD32mr, ADD32ri, ADD32ri8, ADD32rm, ADD32rr,
+ ADD64mi32, ADD64mi8, ADD64mr, ADD64ri32, ...
+
+The default backend prints out all of the records, as `described above`_.
+
+If you plan to use TableGen, you will most likely have to `write a backend`_
+that extracts the information specific to what you need and formats it in the
+appropriate way.
+
+.. _parses a file:
+
+TableGen syntax
+===============
+
+TableGen doesn't care about the meaning of data (that is up to the backend to
+define), but it does care about syntax, and it enforces a simple type system.
+This section describes the syntax and the constructs allowed in a TableGen file.
+
+TableGen primitives
+-------------------
+
+TableGen comments
+^^^^^^^^^^^^^^^^^
+
+TableGen supports BCPL style "``//``" comments, which run to the end of the
+line, and it also supports **nestable** "``/* */``" comments.
+
+.. _TableGen type:
+
+The TableGen type system
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+TableGen files are strongly typed, in a simple (but complete) type-system.
+These types are used to perform automatic conversions, check for errors, and to
+help interface designers constrain the input that they allow. Every `value
+definition`_ is required to have an associated type.
+
+TableGen supports a mixture of very low-level types (such as ``bit``) and very
+high-level types (such as ``dag``). This flexibility is what allows it to
+describe a wide range of information conveniently and compactly. The TableGen
+types are:
+
+``bit``
+ A 'bit' is a boolean value that can hold either 0 or 1.
+
+``int``
+ The 'int' type represents a simple 32-bit integer value, such as 5.
+
+``string``
+ The 'string' type represents an ordered sequence of characters of arbitrary
+ length.
+
+``bits<n>``
+ A 'bits' type is an arbitrary, but fixed, size integer that is broken up
+ into individual bits. This type is useful because it can handle some bits
+ being defined while others are undefined.
+
+``list<ty>``
+ This type represents a list whose elements are some other type. The
+ contained type is arbitrary: it can even be another list type.
+
+Class type
+ Specifying a class name in a type context means that the defined value must
+ be a subclass of the specified class. This is useful in conjunction with
+ the ``list`` type, for example, to constrain the elements of the list to a
+ common base class (e.g., a ``list<Register>`` can only contain definitions
+ derived from the "``Register``" class).
+
+``dag``
+ This type represents a nestable directed graph of elements.
+
+``code``
+ This represents a big hunk of text. This is lexically distinct from string
+ values because it doesn't require escaping double quotes and other common
+ characters that occur in code.
+
+To date, these types have been sufficient for describing things that TableGen
+has been used for, but it is straight-forward to extend this list if needed.
+
+.. _TableGen expressions:
+
+TableGen values and expressions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+TableGen allows for a pretty reasonable number of different expression forms
+when building up values. These forms allow the TableGen file to be written in a
+natural syntax and flavor for the application. The current expression forms
+supported include:
+
+``?``
+ uninitialized field
+
+``0b1001011``
+ binary integer value
+
+``07654321``
+ octal integer value (indicated by a leading 0)
+
+``7``
+ decimal integer value
+
+``0x7F``
+ hexadecimal integer value
+
+``"foo"``
+ string value
+
+``[{ ... }]``
+ code fragment
+
+``[ X, Y, Z ]<type>``
+ list value. <type> is the type of the list element and is usually optional.
+ In rare cases, TableGen is unable to deduce the element type in which case
+ the user must specify it explicitly.
+
+``{ a, b, c }``
+ initializer for a "bits<3>" value
+
+``value``
+ value reference
+
+``value{17}``
+ access to one bit of a value
+
+``value{15-17}``
+ access to multiple bits of a value
+
+``DEF``
+ reference to a record definition
+
+``CLASS<val list>``
+ reference to a new anonymous definition of CLASS with the specified template
+ arguments.
+
+``X.Y``
+ reference to the subfield of a value
+
+``list[4-7,17,2-3]``
+ A slice of the 'list' list, including elements 4,5,6,7,17,2, and 3 from it.
+ Elements may be included multiple times.
+
+``foreach <var> = [ <list> ] in { <body> }``
+
+``foreach <var> = [ <list> ] in <def>``
+ Replicate <body> or <def>, replacing instances of <var> with each value
+ in <list>. <var> is scoped at the level of the ``foreach`` loop and must
+ not conflict with any other object introduced in <body> or <def>. Currently
+ only ``def``\s are expanded within <body>.
+
+``foreach <var> = 0-15 in ...``
+
+``foreach <var> = {0-15,32-47} in ...``
+ Loop over ranges of integers. The braces are required for multiple ranges.
+
+``(DEF a, b)``
+ a dag value. The first element is required to be a record definition, the
+ remaining elements in the list may be arbitrary other values, including
+ nested ```dag``' values.
+
+``!strconcat(a, b)``
+ A string value that is the result of concatenating the 'a' and 'b' strings.
+
+``str1#str2``
+ "#" (paste) is a shorthand for !strconcat. It may concatenate things that
+ are not quoted strings, in which case an implicit !cast<string> is done on
+ the operand of the paste.
+
+``!cast<type>(a)``
+ A symbol of type *type* obtained by looking up the string 'a' in the symbol
+ table. If the type of 'a' does not match *type*, TableGen aborts with an
+ error. !cast<string> is a special case in that the argument must be an
+ object defined by a 'def' construct.
+
+``!subst(a, b, c)``
+ If 'a' and 'b' are of string type or are symbol references, substitute 'b'
+ for 'a' in 'c.' This operation is analogous to $(subst) in GNU make.
+
+``!foreach(a, b, c)``
+ For each member 'b' of dag or list 'a' apply operator 'c.' 'b' is a dummy
+ variable that should be declared as a member variable of an instantiated
+ class. This operation is analogous to $(foreach) in GNU make.
+
+``!head(a)``
+ The first element of list 'a.'
+
+``!tail(a)``
+ The 2nd-N elements of list 'a.'
+
+``!empty(a)``
+ An integer {0,1} indicating whether list 'a' is empty.
+
+``!if(a,b,c)``
+ 'b' if the result of 'int' or 'bit' operator 'a' is nonzero, 'c' otherwise.
+
+``!eq(a,b)``
+ 'bit 1' if string a is equal to string b, 0 otherwise. This only operates
+ on string, int and bit objects. Use !cast<string> to compare other types of
+ objects.
+
+Note that all of the values have rules specifying how they convert to values
+for different types. These rules allow you to assign a value like "``7``"
+to a "``bits<4>``" value, for example.
+
+Classes and definitions
+-----------------------
+
+As mentioned in the `intro`_, classes and definitions (collectively known as
+'records') in TableGen are the main high-level unit of information that TableGen
+collects. Records are defined with a ``def`` or ``class`` keyword, the record
+name, and an optional list of "`template arguments`_". If the record has
+superclasses, they are specified as a comma separated list that starts with a
+colon character ("``:``"). If `value definitions`_ or `let expressions`_ are
+needed for the class, they are enclosed in curly braces ("``{}``"); otherwise,
+the record ends with a semicolon.
+
+Here is a simple TableGen file:
+
+.. code-block:: llvm
+
+ class C { bit V = 1; }
+ def X : C;
+ def Y : C {
+ string Greeting = "hello";
+ }
+
+This example defines two definitions, ``X`` and ``Y``, both of which derive from
+the ``C`` class. Because of this, they both get the ``V`` bit value. The ``Y``
+definition also gets the Greeting member as well.
+
+In general, classes are useful for collecting together the commonality between a
+group of records and isolating it in a single place. Also, classes permit the
+specification of default values for their subclasses, allowing the subclasses to
+override them as they wish.
+
+.. _value definition:
+.. _value definitions:
+
+Value definitions
+^^^^^^^^^^^^^^^^^
+
+Value definitions define named entries in records. A value must be defined
+before it can be referred to as the operand for another value definition or
+before the value is reset with a `let expression`_. A value is defined by
+specifying a `TableGen type`_ and a name. If an initial value is available, it
+may be specified after the type with an equal sign. Value definitions require
+terminating semicolons.
+
+.. _let expression:
+.. _let expressions:
+.. _"let" expressions within a record:
+
+'let' expressions
+^^^^^^^^^^^^^^^^^
+
+A record-level let expression is used to change the value of a value definition
+in a record. This is primarily useful when a superclass defines a value that a
+derived class or definition wants to override. Let expressions consist of the
+'``let``' keyword followed by a value name, an equal sign ("``=``"), and a new
+value. For example, a new class could be added to the example above, redefining
+the ``V`` field for all of its subclasses:
+
+.. code-block:: llvm
+
+ class D : C { let V = 0; }
+ def Z : D;
+
+In this case, the ``Z`` definition will have a zero value for its ``V`` value,
+despite the fact that it derives (indirectly) from the ``C`` class, because the
+``D`` class overrode its value.
+
+.. _template arguments:
+
+Class template arguments
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+TableGen permits the definition of parameterized classes as well as normal
+concrete classes. Parameterized TableGen classes specify a list of variable
+bindings (which may optionally have defaults) that are bound when used. Here is
+a simple example:
+
+.. code-block:: llvm
+
+ class FPFormat<bits<3> val> {
+ bits<3> Value = val;
+ }
+ def NotFP : FPFormat<0>;
+ def ZeroArgFP : FPFormat<1>;
+ def OneArgFP : FPFormat<2>;
+ def OneArgFPRW : FPFormat<3>;
+ def TwoArgFP : FPFormat<4>;
+ def CompareFP : FPFormat<5>;
+ def CondMovFP : FPFormat<6>;
+ def SpecialFP : FPFormat<7>;
+
+In this case, template arguments are used as a space efficient way to specify a
+list of "enumeration values", each with a "``Value``" field set to the specified
+integer.
+
+The more esoteric forms of `TableGen expressions`_ are useful in conjunction
+with template arguments. As an example:
+
+.. code-block:: llvm
+
+ class ModRefVal<bits<2> val> {
+ bits<2> Value = val;
+ }
+
+ def None : ModRefVal<0>;
+ def Mod : ModRefVal<1>;
+ def Ref : ModRefVal<2>;
+ def ModRef : ModRefVal<3>;
+
+ class Value<ModRefVal MR> {
+ // Decode some information into a more convenient format, while providing
+ // a nice interface to the user of the "Value" class.
+ bit isMod = MR.Value{0};
+ bit isRef = MR.Value{1};
+
+ // other stuff...
+ }
+
+ // Example uses
+ def bork : Value<Mod>;
+ def zork : Value<Ref>;
+ def hork : Value<ModRef>;
+
+This is obviously a contrived example, but it shows how template arguments can
+be used to decouple the interface provided to the user of the class from the
+actual internal data representation expected by the class. In this case,
+running ``llvm-tblgen`` on the example prints the following definitions:
+
+.. code-block:: llvm
+
+ def bork { // Value
+ bit isMod = 1;
+ bit isRef = 0;
+ }
+ def hork { // Value
+ bit isMod = 1;
+ bit isRef = 1;
+ }
+ def zork { // Value
+ bit isMod = 0;
+ bit isRef = 1;
+ }
+
+This shows that TableGen was able to dig into the argument and extract a piece
+of information that was requested by the designer of the "Value" class. For
+more realistic examples, please see existing users of TableGen, such as the X86
+backend.
+
+Multiclass definitions and instances
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+While classes with template arguments are a good way to factor commonality
+between two instances of a definition, multiclasses allow a convenient notation
+for defining multiple definitions at once (instances of implicitly constructed
+classes). For example, consider an 3-address instruction set whose instructions
+come in two forms: "``reg = reg op reg``" and "``reg = reg op imm``"
+(e.g. SPARC). In this case, you'd like to specify in one place that this
+commonality exists, then in a separate place indicate what all the ops are.
+
+Here is an example TableGen fragment that shows this idea:
+
+.. code-block:: llvm
+
+ def ops;
+ def GPR;
+ def Imm;
+ class inst<int opc, string asmstr, dag operandlist>;
+
+ multiclass ri_inst<int opc, string asmstr> {
+ def _rr : inst<opc, !strconcat(asmstr, " $dst, $src1, $src2"),
+ (ops GPR:$dst, GPR:$src1, GPR:$src2)>;
+ def _ri : inst<opc, !strconcat(asmstr, " $dst, $src1, $src2"),
+ (ops GPR:$dst, GPR:$src1, Imm:$src2)>;
+ }
+
+ // Instantiations of the ri_inst multiclass.
+ defm ADD : ri_inst<0b111, "add">;
+ defm SUB : ri_inst<0b101, "sub">;
+ defm MUL : ri_inst<0b100, "mul">;
+ ...
+
+The name of the resultant definitions has the multidef fragment names appended
+to them, so this defines ``ADD_rr``, ``ADD_ri``, ``SUB_rr``, etc. A defm may
+inherit from multiple multiclasses, instantiating definitions from each
+multiclass. Using a multiclass this way is exactly equivalent to instantiating
+the classes multiple times yourself, e.g. by writing:
+
+.. code-block:: llvm
+
+ def ops;
+ def GPR;
+ def Imm;
+ class inst<int opc, string asmstr, dag operandlist>;
+
+ class rrinst<int opc, string asmstr>
+ : inst<opc, !strconcat(asmstr, " $dst, $src1, $src2"),
+ (ops GPR:$dst, GPR:$src1, GPR:$src2)>;
+
+ class riinst<int opc, string asmstr>
+ : inst<opc, !strconcat(asmstr, " $dst, $src1, $src2"),
+ (ops GPR:$dst, GPR:$src1, Imm:$src2)>;
+
+ // Instantiations of the ri_inst multiclass.
+ def ADD_rr : rrinst<0b111, "add">;
+ def ADD_ri : riinst<0b111, "add">;
+ def SUB_rr : rrinst<0b101, "sub">;
+ def SUB_ri : riinst<0b101, "sub">;
+ def MUL_rr : rrinst<0b100, "mul">;
+ def MUL_ri : riinst<0b100, "mul">;
+ ...
+
+A ``defm`` can also be used inside a multiclass providing several levels of
+multiclass instanciations.
+
+.. code-block:: llvm
+
+ class Instruction<bits<4> opc, string Name> {
+ bits<4> opcode = opc;
+ string name = Name;
+ }
+
+ multiclass basic_r<bits<4> opc> {
+ def rr : Instruction<opc, "rr">;
+ def rm : Instruction<opc, "rm">;
+ }
+
+ multiclass basic_s<bits<4> opc> {
+ defm SS : basic_r<opc>;
+ defm SD : basic_r<opc>;
+ def X : Instruction<opc, "x">;
+ }
+
+ multiclass basic_p<bits<4> opc> {
+ defm PS : basic_r<opc>;
+ defm PD : basic_r<opc>;
+ def Y : Instruction<opc, "y">;
+ }
+
+ defm ADD : basic_s<0xf>, basic_p<0xf>;
+ ...
+
+ // Results
+ def ADDPDrm { ...
+ def ADDPDrr { ...
+ def ADDPSrm { ...
+ def ADDPSrr { ...
+ def ADDSDrm { ...
+ def ADDSDrr { ...
+ def ADDY { ...
+ def ADDX { ...
+
+``defm`` declarations can inherit from classes too, the rule to follow is that
+the class list must start after the last multiclass, and there must be at least
+one multiclass before them.
+
+.. code-block:: llvm
+
+ class XD { bits<4> Prefix = 11; }
+ class XS { bits<4> Prefix = 12; }
+
+ class I<bits<4> op> {
+ bits<4> opcode = op;
+ }
+
+ multiclass R {
+ def rr : I<4>;
+ def rm : I<2>;
+ }
+
+ multiclass Y {
+ defm SS : R, XD;
+ defm SD : R, XS;
+ }
+
+ defm Instr : Y;
+
+ // Results
+ def InstrSDrm {
+ bits<4> opcode = { 0, 0, 1, 0 };
+ bits<4> Prefix = { 1, 1, 0, 0 };
+ }
+ ...
+ def InstrSSrr {
+ bits<4> opcode = { 0, 1, 0, 0 };
+ bits<4> Prefix = { 1, 0, 1, 1 };
+ }
+
+File scope entities
+-------------------
+
+File inclusion
+^^^^^^^^^^^^^^
+
+TableGen supports the '``include``' token, which textually substitutes the
+specified file in place of the include directive. The filename should be
+specified as a double quoted string immediately after the '``include``' keyword.
+Example:
+
+.. code-block:: llvm
+
+ include "foo.td"
+
+'let' expressions
+^^^^^^^^^^^^^^^^^
+
+"Let" expressions at file scope are similar to `"let" expressions within a
+record`_, except they can specify a value binding for multiple records at a
+time, and may be useful in certain other cases. File-scope let expressions are
+really just another way that TableGen allows the end-user to factor out
+commonality from the records.
+
+File-scope "let" expressions take a comma-separated list of bindings to apply,
+and one or more records to bind the values in. Here are some examples:
+
+.. code-block:: llvm
+
+ let isTerminator = 1, isReturn = 1, isBarrier = 1, hasCtrlDep = 1 in
+ def RET : I<0xC3, RawFrm, (outs), (ins), "ret", [(X86retflag 0)]>;
+
+ let isCall = 1 in
+ // All calls clobber the non-callee saved registers...
+ let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, EFLAGS] in {
+ def CALLpcrel32 : Ii32<0xE8, RawFrm, (outs), (ins i32imm:$dst,variable_ops),
+ "call\t${dst:call}", []>;
+ def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst, variable_ops),
+ "call\t{*}$dst", [(X86call GR32:$dst)]>;
+ def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops),
+ "call\t{*}$dst", []>;
+ }
+
+File-scope "let" expressions are often useful when a couple of definitions need
+to be added to several records, and the records do not otherwise need to be
+opened, as in the case with the ``CALL*`` instructions above.
+
+It's also possible to use "let" expressions inside multiclasses, providing more
+ways to factor out commonality from the records, specially if using several
+levels of multiclass instanciations. This also avoids the need of using "let"
+expressions within subsequent records inside a multiclass.
+
+.. code-block:: llvm
+
+ multiclass basic_r<bits<4> opc> {
+ let Predicates = [HasSSE2] in {
+ def rr : Instruction<opc, "rr">;
+ def rm : Instruction<opc, "rm">;
+ }
+ let Predicates = [HasSSE3] in
+ def rx : Instruction<opc, "rx">;
+ }
+
+ multiclass basic_ss<bits<4> opc> {
+ let IsDouble = 0 in
+ defm SS : basic_r<opc>;
+
+ let IsDouble = 1 in
+ defm SD : basic_r<opc>;
+ }
+
+ defm ADD : basic_ss<0xf>;
+
+Looping
+^^^^^^^
+
+TableGen supports the '``foreach``' block, which textually replicates the loop
+body, substituting iterator values for iterator references in the body.
+Example:
+
+.. code-block:: llvm
+
+ foreach i = [0, 1, 2, 3] in {
+ def R#i : Register<...>;
+ def F#i : Register<...>;
+ }
+
+This will create objects ``R0``, ``R1``, ``R2`` and ``R3``. ``foreach`` blocks
+may be nested. If there is only one item in the body the braces may be
+elided:
+
+.. code-block:: llvm
+
+ foreach i = [0, 1, 2, 3] in
+ def R#i : Register<...>;
+
+Code Generator backend info
+===========================
+
+Expressions used by code generator to describe instructions and isel patterns:
+
+``(implicit a)``
+ an implicitly defined physical register. This tells the dag instruction
+ selection emitter the input pattern's extra definitions matches implicit
+ physical register definitions.
+
+.. _TableGen backend:
+.. _TableGen backends:
+.. _write a backend:
+
+TableGen backends
+=================
+
+TODO: How they work, how to write one. This section should not contain details
+about any particular backend, except maybe ``-print-enums`` as an example. This
+should highlight the APIs in ``TableGen/Record.h``.
diff --git a/docs/TestSuiteMakefileGuide.html b/docs/TestSuiteMakefileGuide.html
new file mode 100644
index 00000000000..1b24250380f
--- /dev/null
+++ b/docs/TestSuiteMakefileGuide.html
@@ -0,0 +1,351 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>LLVM test-suite Makefile Guide</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+<body>
+
+<h1>
+ LLVM test-suite Makefile Guide
+</h1>
+
+<ol>
+ <li><a href="#overview">Overview</a></li>
+ <li><a href="#testsuitestructure">Test suite structure</a></li>
+ <li><a href="#testsuiterun">Running the test suite</a>
+ <ul>
+ <li><a href="#testsuiteexternal">Configuring External Tests</a></li>
+ <li><a href="#testsuitetests">Running different tests</a></li>
+ <li><a href="#testsuiteoutput">Generating test output</a></li>
+ <li><a href="#testsuitecustom">Writing custom tests for test-suite</a></li>
+ </ul>
+ </li>
+</ol>
+
+<div class="doc_author">
+ <p>Written by John T. Criswell, Daniel Dunbar, Reid Spencer, and Tanya Lattner</p>
+</div>
+
+<!--=========================================================================-->
+<h2><a name="overview">Overview</a></h2>
+<!--=========================================================================-->
+
+<div>
+
+<p>This document describes the features of the Makefile-based LLVM
+test-suite. This way of interacting with the test-suite is deprecated in favor
+of running the test-suite using LNT, but may continue to prove useful for some
+users. See the Testing
+Guide's <a href="TestingGuide.html#testsuitequickstart">test-suite
+Quickstart</a> section for more information.</p>
+
+</div>
+
+<!--=========================================================================-->
+<h2><a name="testsuitestructure">Test suite Structure</a></h2>
+<!--=========================================================================-->
+
+<div>
+
+<p>The <tt>test-suite</tt> module contains a number of programs that can be compiled
+with LLVM and executed. These programs are compiled using the native compiler
+and various LLVM backends. The output from the program compiled with the
+native compiler is assumed correct; the results from the other programs are
+compared to the native program output and pass if they match.</p>
+
+<p>When executing tests, it is usually a good idea to start out with a subset of
+the available tests or programs. This makes test run times smaller at first and
+later on this is useful to investigate individual test failures. To run some
+test only on a subset of programs, simply change directory to the programs you
+want tested and run <tt>gmake</tt> there. Alternatively, you can run a different
+test using the <tt>TEST</tt> variable to change what tests or run on the
+selected programs (see below for more info).</p>
+
+<p>In addition for testing correctness, the <tt>test-suite</tt> directory also
+performs timing tests of various LLVM optimizations. It also records
+compilation times for the compilers and the JIT. This information can be
+used to compare the effectiveness of LLVM's optimizations and code
+generation.</p>
+
+<p><tt>test-suite</tt> tests are divided into three types of tests: MultiSource,
+SingleSource, and External.</p>
+
+<ul>
+<li><tt>test-suite/SingleSource</tt>
+<p>The SingleSource directory contains test programs that are only a single
+source file in size. These are usually small benchmark programs or small
+programs that calculate a particular value. Several such programs are grouped
+together in each directory.</p></li>
+
+<li><tt>test-suite/MultiSource</tt>
+<p>The MultiSource directory contains subdirectories which contain entire
+programs with multiple source files. Large benchmarks and whole applications
+go here.</p></li>
+
+<li><tt>test-suite/External</tt>
+<p>The External directory contains Makefiles for building code that is external
+to (i.e., not distributed with) LLVM. The most prominent members of this
+directory are the SPEC 95 and SPEC 2000 benchmark suites. The <tt>External</tt>
+directory does not contain these actual tests, but only the Makefiles that know
+how to properly compile these programs from somewhere else. The presence and
+location of these external programs is configured by the test-suite
+<tt>configure</tt> script.</p></li>
+</ul>
+
+<p>Each tree is then subdivided into several categories, including applications,
+benchmarks, regression tests, code that is strange grammatically, etc. These
+organizations should be relatively self explanatory.</p>
+
+<p>Some tests are known to fail. Some are bugs that we have not fixed yet;
+others are features that we haven't added yet (or may never add). In the
+regression tests, the result for such tests will be XFAIL (eXpected FAILure).
+In this way, you can tell the difference between an expected and unexpected
+failure.</p>
+
+<p>The tests in the test suite have no such feature at this time. If the
+test passes, only warnings and other miscellaneous output will be generated. If
+a test fails, a large &lt;program&gt; FAILED message will be displayed. This
+will help you separate benign warnings from actual test failures.</p>
+
+</div>
+
+<!--=========================================================================-->
+<h2><a name="testsuiterun">Running the test suite</a></h2>
+<!--=========================================================================-->
+
+<div>
+
+<p>First, all tests are executed within the LLVM object directory tree. They
+<i>are not</i> executed inside of the LLVM source tree. This is because the
+test suite creates temporary files during execution.</p>
+
+<p>To run the test suite, you need to use the following steps:</p>
+
+<ol>
+ <li><tt>cd</tt> into the <tt>llvm/projects</tt> directory in your source tree.
+ </li>
+
+ <li><p>Check out the <tt>test-suite</tt> module with:</p>
+
+<div class="doc_code">
+<pre>
+% svn co http://llvm.org/svn/llvm-project/test-suite/trunk test-suite
+</pre>
+</div>
+ <p>This will get the test suite into <tt>llvm/projects/test-suite</tt>.</p>
+ </li>
+ <li><p>Configure and build <tt>llvm</tt>.</p></li>
+ <li><p>Configure and build <tt>llvm-gcc</tt>.</p></li>
+ <li><p>Install <tt>llvm-gcc</tt> somewhere.</p></li>
+ <li><p><em>Re-configure</em> <tt>llvm</tt> from the top level of
+ each build tree (LLVM object directory tree) in which you want
+ to run the test suite, just as you do before building LLVM.</p>
+ <p>During the <em>re-configuration</em>, you must either: (1)
+ have <tt>llvm-gcc</tt> you just built in your path, or (2)
+ specify the directory where your just-built <tt>llvm-gcc</tt> is
+ installed using <tt>--with-llvmgccdir=$LLVM_GCC_DIR</tt>.</p>
+ <p>You must also tell the configure machinery that the test suite
+ is available so it can be configured for your build tree:</p>
+<div class="doc_code">
+<pre>
+% cd $LLVM_OBJ_ROOT ; $LLVM_SRC_ROOT/configure [--with-llvmgccdir=$LLVM_GCC_DIR]
+</pre>
+</div>
+ <p>[Remember that <tt>$LLVM_GCC_DIR</tt> is the directory where you
+ <em>installed</em> llvm-gcc, not its src or obj directory.]</p>
+ </li>
+
+ <li><p>You can now run the test suite from your build tree as follows:</p>
+<div class="doc_code">
+<pre>
+% cd $LLVM_OBJ_ROOT/projects/test-suite
+% make
+</pre>
+</div>
+ </li>
+</ol>
+<p>Note that the second and third steps only need to be done once. After you
+have the suite checked out and configured, you don't need to do it again (unless
+the test code or configure script changes).</p>
+
+<!-- _______________________________________________________________________ -->
+<h3>
+ <a name="testsuiteexternal">Configuring External Tests</a>
+</h3>
+<!-- _______________________________________________________________________ -->
+
+<div>
+<p>In order to run the External tests in the <tt>test-suite</tt>
+ module, you must specify <i>--with-externals</i>. This
+ must be done during the <em>re-configuration</em> step (see above),
+ and the <tt>llvm</tt> re-configuration must recognize the
+ previously-built <tt>llvm-gcc</tt>. If any of these is missing or
+ neglected, the External tests won't work.</p>
+<dl>
+<dt><i>--with-externals</i></dt>
+<dt><i>--with-externals=&lt;<tt>directory</tt>&gt;</i></dt>
+</dl>
+ This tells LLVM where to find any external tests. They are expected to be
+ in specifically named subdirectories of &lt;<tt>directory</tt>&gt;.
+ If <tt>directory</tt> is left unspecified,
+ <tt>configure</tt> uses the default value
+ <tt>/home/vadve/shared/benchmarks/speccpu2000/benchspec</tt>.
+ Subdirectory names known to LLVM include:
+ <dl>
+ <dt>spec95</dt>
+ <dt>speccpu2000</dt>
+ <dt>speccpu2006</dt>
+ <dt>povray31</dt>
+ </dl>
+ Others are added from time to time, and can be determined from
+ <tt>configure</tt>.
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h3>
+ <a name="testsuitetests">Running different tests</a>
+</h3>
+<!-- _______________________________________________________________________ -->
+<div>
+<p>In addition to the regular "whole program" tests, the <tt>test-suite</tt>
+module also provides a mechanism for compiling the programs in different ways.
+If the variable TEST is defined on the <tt>gmake</tt> command line, the test system will
+include a Makefile named <tt>TEST.&lt;value of TEST variable&gt;.Makefile</tt>.
+This Makefile can modify build rules to yield different results.</p>
+
+<p>For example, the LLVM nightly tester uses <tt>TEST.nightly.Makefile</tt> to
+create the nightly test reports. To run the nightly tests, run <tt>gmake
+TEST=nightly</tt>.</p>
+
+<p>There are several TEST Makefiles available in the tree. Some of them are
+designed for internal LLVM research and will not work outside of the LLVM
+research group. They may still be valuable, however, as a guide to writing your
+own TEST Makefile for any optimization or analysis passes that you develop with
+LLVM.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h3>
+ <a name="testsuiteoutput">Generating test output</a>
+</h3>
+<!-- _______________________________________________________________________ -->
+<div>
+ <p>There are a number of ways to run the tests and generate output. The most
+ simple one is simply running <tt>gmake</tt> with no arguments. This will
+ compile and run all programs in the tree using a number of different methods
+ and compare results. Any failures are reported in the output, but are likely
+ drowned in the other output. Passes are not reported explicitly.</p>
+
+ <p>Somewhat better is running <tt>gmake TEST=sometest test</tt>, which runs
+ the specified test and usually adds per-program summaries to the output
+ (depending on which sometest you use). For example, the <tt>nightly</tt> test
+ explicitly outputs TEST-PASS or TEST-FAIL for every test after each program.
+ Though these lines are still drowned in the output, it's easy to grep the
+ output logs in the Output directories.</p>
+
+ <p>Even better are the <tt>report</tt> and <tt>report.format</tt> targets
+ (where <tt>format</tt> is one of <tt>html</tt>, <tt>csv</tt>, <tt>text</tt> or
+ <tt>graphs</tt>). The exact contents of the report are dependent on which
+ <tt>TEST</tt> you are running, but the text results are always shown at the
+ end of the run and the results are always stored in the
+ <tt>report.&lt;type&gt;.format</tt> file (when running with
+ <tt>TEST=&lt;type&gt;</tt>).
+
+ The <tt>report</tt> also generate a file called
+ <tt>report.&lt;type&gt;.raw.out</tt> containing the output of the entire test
+ run.
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h3>
+ <a name="testsuitecustom">Writing custom tests for the test suite</a>
+</h3>
+<!-- _______________________________________________________________________ -->
+
+<div>
+
+<p>Assuming you can run the test suite, (e.g. "<tt>gmake TEST=nightly report</tt>"
+should work), it is really easy to run optimizations or code generator
+components against every program in the tree, collecting statistics or running
+custom checks for correctness. At base, this is how the nightly tester works,
+it's just one example of a general framework.</p>
+
+<p>Lets say that you have an LLVM optimization pass, and you want to see how
+many times it triggers. First thing you should do is add an LLVM
+<a href="ProgrammersManual.html#Statistic">statistic</a> to your pass, which
+will tally counts of things you care about.</p>
+
+<p>Following this, you can set up a test and a report that collects these and
+formats them for easy viewing. This consists of two files, a
+"<tt>test-suite/TEST.XXX.Makefile</tt>" fragment (where XXX is the name of your
+test) and a "<tt>test-suite/TEST.XXX.report</tt>" file that indicates how to
+format the output into a table. There are many example reports of various
+levels of sophistication included with the test suite, and the framework is very
+general.</p>
+
+<p>If you are interested in testing an optimization pass, check out the
+"libcalls" test as an example. It can be run like this:<p>
+
+<div class="doc_code">
+<pre>
+% cd llvm/projects/test-suite/MultiSource/Benchmarks # or some other level
+% make TEST=libcalls report
+</pre>
+</div>
+
+<p>This will do a bunch of stuff, then eventually print a table like this:</p>
+
+<div class="doc_code">
+<pre>
+Name | total | #exit |
+...
+FreeBench/analyzer/analyzer | 51 | 6 |
+FreeBench/fourinarow/fourinarow | 1 | 1 |
+FreeBench/neural/neural | 19 | 9 |
+FreeBench/pifft/pifft | 5 | 3 |
+MallocBench/cfrac/cfrac | 1 | * |
+MallocBench/espresso/espresso | 52 | 12 |
+MallocBench/gs/gs | 4 | * |
+Prolangs-C/TimberWolfMC/timberwolfmc | 302 | * |
+Prolangs-C/agrep/agrep | 33 | 12 |
+Prolangs-C/allroots/allroots | * | * |
+Prolangs-C/assembler/assembler | 47 | * |
+Prolangs-C/bison/mybison | 74 | * |
+...
+</pre>
+</div>
+
+<p>This basically is grepping the -stats output and displaying it in a table.
+You can also use the "TEST=libcalls report.html" target to get the table in HTML
+form, similarly for report.csv and report.tex.</p>
+
+<p>The source for this is in test-suite/TEST.libcalls.*. The format is pretty
+simple: the Makefile indicates how to run the test (in this case,
+"<tt>opt -simplify-libcalls -stats</tt>"), and the report contains one line for
+each column of the output. The first value is the header for the column and the
+second is the regex to grep the output of the command for. There are lots of
+example reports that can do fancy stuff.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ John T. Criswell, Daniel Dunbar, Reid Spencer, and Tanya Lattner<br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/TestingGuide.html b/docs/TestingGuide.html
new file mode 100644
index 00000000000..1f9c9157306
--- /dev/null
+++ b/docs/TestingGuide.html
@@ -0,0 +1,915 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>LLVM Testing Infrastructure Guide</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+<body>
+
+<h1>
+ LLVM Testing Infrastructure Guide
+</h1>
+
+<ol>
+ <li><a href="#overview">Overview</a></li>
+ <li><a href="#requirements">Requirements</a></li>
+ <li><a href="#org">LLVM testing infrastructure organization</a>
+ <ul>
+ <li><a href="#regressiontests">Regression tests</a></li>
+ <li><a href="#testsuite"><tt>test-suite</tt></a></li>
+ <li><a href="#debuginfotests">Debugging Information tests</a></li>
+ </ul>
+ </li>
+ <li><a href="#quick">Quick start</a>
+ <ul>
+ <li><a href="#quickregressiontests">Regression tests</a></li>
+ <li><a href="#quickdebuginfotests">Debugging Information tests</a></li>
+ </ul>
+ </li>
+ <li><a href="#rtstructure">Regression test structure</a>
+ <ul>
+ <li><a href="#rtcustom">Writing new regression tests</a></li>
+ <li><a href="#FileCheck">The FileCheck utility</a></li>
+ <li><a href="#rtvars">Variables and substitutions</a></li>
+ <li><a href="#rtfeatures">Other features</a></li>
+ </ul>
+ </li>
+ <li><a href="#testsuiteoverview"><tt>test-suite</tt> Overview</a>
+ <ul>
+ <li><a href="#testsuitequickstart"><tt>test-suite</tt> Quickstart</a></li>
+ <li><a href="#testsuitemakefiles"><tt>test-suite</tt> Makefiles</a></li>
+ </ul>
+ </li>
+</ol>
+
+<div class="doc_author">
+ <p>Written by John T. Criswell, Daniel Dunbar, Reid Spencer, and Tanya Lattner</p>
+</div>
+
+<!--=========================================================================-->
+<h2><a name="overview">Overview</a></h2>
+<!--=========================================================================-->
+
+<div>
+
+<p>This document is the reference manual for the LLVM testing infrastructure. It
+documents the structure of the LLVM testing infrastructure, the tools needed to
+use it, and how to add and run tests.</p>
+
+</div>
+
+<!--=========================================================================-->
+<h2><a name="requirements">Requirements</a></h2>
+<!--=========================================================================-->
+
+<div>
+
+<p>In order to use the LLVM testing infrastructure, you will need all of the
+software required to build LLVM, as well
+as <a href="http://python.org">Python</a> 2.4 or later.</p>
+
+</div>
+
+<!--=========================================================================-->
+<h2><a name="org">LLVM testing infrastructure organization</a></h2>
+<!--=========================================================================-->
+
+<div>
+
+<p>The LLVM testing infrastructure contains two major categories of tests:
+regression tests and whole programs. The regression tests are contained inside
+the LLVM repository itself under <tt>llvm/test</tt> and are expected to always
+pass -- they should be run before every commit.</p>
+
+<p>The whole programs tests are referred to as the "LLVM test suite" (or
+"test-suite") and are in the <tt>test-suite</tt> module in subversion. For
+historical reasons, these tests are also referred to as the "nightly tests" in
+places, which is less ambiguous than "test-suite" and remains in use although we
+run them much more often than nightly.</p>
+
+<!-- _______________________________________________________________________ -->
+<h3><a name="regressiontests">Regression tests</a></h3>
+<!-- _______________________________________________________________________ -->
+
+<div>
+
+<p>The regression tests are small pieces of code that test a specific feature of
+LLVM or trigger a specific bug in LLVM. They are usually written in LLVM
+assembly language, but can be written in other languages if the test targets a
+particular language front end (and the appropriate <tt>--with-llvmgcc</tt>
+options were used at <tt>configure</tt> time of the <tt>llvm</tt> module). These
+tests are driven by the 'lit' testing tool, which is part of LLVM.</p>
+
+<p>These code fragments are not complete programs. The code generated
+from them is never executed to determine correct behavior.</p>
+
+<p>These code fragment tests are located in the <tt>llvm/test</tt>
+directory.</p>
+
+<p>Typically when a bug is found in LLVM, a regression test containing
+just enough code to reproduce the problem should be written and placed
+somewhere underneath this directory. In most cases, this will be a small
+piece of LLVM assembly language code, often distilled from an actual
+application or benchmark.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h3><a name="testsuite"><tt>test-suite</tt></a></h3>
+<!-- _______________________________________________________________________ -->
+
+<div>
+
+<p>The test suite contains whole programs, which are pieces of code which can be
+compiled and linked into a stand-alone program that can be executed. These
+programs are generally written in high level languages such as C or C++.</p>
+
+<p>These programs are compiled using a user specified compiler and set of flags,
+and then executed to capture the program output and timing information. The
+output of these programs is compared to a reference output to ensure that the
+program is being compiled correctly.</p>
+
+<p>In addition to compiling and executing programs, whole program tests serve as
+a way of benchmarking LLVM performance, both in terms of the efficiency of the
+programs generated as well as the speed with which LLVM compiles, optimizes, and
+generates code.</p>
+
+<p>The test-suite is located in the <tt>test-suite</tt> Subversion module.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h3><a name="debuginfotests">Debugging Information tests</a></h3>
+<!-- _______________________________________________________________________ -->
+
+<div>
+
+<p>The test suite contains tests to check quality of debugging information.
+The test are written in C based languages or in LLVM assembly language. </p>
+
+<p>These tests are compiled and run under a debugger. The debugger output
+is checked to validate of debugging information. See README.txt in the
+test suite for more information . This test suite is located in the
+<tt>debuginfo-tests</tt> Subversion module. </p>
+
+</div>
+
+</div>
+
+<!--=========================================================================-->
+<h2><a name="quick">Quick start</a></h2>
+<!--=========================================================================-->
+
+<div>
+
+ <p>The tests are located in two separate Subversion modules. The regressions
+ tests are in the main "llvm" module under the directory
+ <tt>llvm/test</tt> (so you get these tests for free with the main llvm
+ tree). Use "make check-all" to run the regression tests after building
+ LLVM.</p>
+
+ <p>The more comprehensive test suite that includes whole programs in C and C++
+ is in the <tt>test-suite</tt>
+ module. See <a href="#testsuitequickstart"><tt>test-suite</tt> Quickstart</a>
+ for more information on running these tests.</p>
+
+<!-- _______________________________________________________________________ -->
+<h3><a name="quickregressiontests">Regression tests</a></h3>
+<div>
+<!-- _______________________________________________________________________ -->
+<p>To run all of the LLVM regression tests, use master Makefile in
+ the <tt>llvm/test</tt> directory:</p>
+
+<div class="doc_code">
+<pre>
+% gmake -C llvm/test
+</pre>
+</div>
+
+<p>or</p>
+
+<div class="doc_code">
+<pre>
+% gmake check
+</pre>
+</div>
+
+<p>If you have <a href="http://clang.llvm.org/">Clang</a> checked out and built,
+you can run the LLVM and Clang tests simultaneously using:</p>
+
+<p>or</p>
+
+<div class="doc_code">
+<pre>
+% gmake check-all
+</pre>
+</div>
+
+<p>To run the tests with Valgrind (Memcheck by default), just append
+<tt>VG=1</tt> to the commands above, e.g.:</p>
+
+<div class="doc_code">
+<pre>
+% gmake check VG=1
+</pre>
+</div>
+
+<p>To run individual tests or subsets of tests, you can use the 'llvm-lit'
+script which is built as part of LLVM. For example, to run the
+'Integer/BitCast.ll' test by itself you can run:</p>
+
+<div class="doc_code">
+<pre>
+% llvm-lit ~/llvm/test/Integer/BitCast.ll
+</pre>
+</div>
+
+<p>or to run all of the ARM CodeGen tests:</p>
+
+<div class="doc_code">
+<pre>
+% llvm-lit ~/llvm/test/CodeGen/ARM
+</pre>
+</div>
+
+<p>For more information on using the 'lit' tool, see 'llvm-lit --help' or the
+'lit' man page.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h3><a name="quickdebuginfotests">Debugging Information tests</a></h3>
+<div>
+<!-- _______________________________________________________________________ -->
+<div>
+
+<p> To run debugging information tests simply checkout the tests inside
+clang/test directory. </p>
+
+<div class="doc_code">
+<pre>
+%cd clang/test
+% svn co http://llvm.org/svn/llvm-project/debuginfo-tests/trunk debuginfo-tests
+</pre>
+</div>
+
+<p> These tests are already set up to run as part of clang regression tests.</p>
+
+</div>
+
+</div>
+
+</div>
+
+<!--=========================================================================-->
+<h2><a name="rtstructure">Regression test structure</a></h2>
+<!--=========================================================================-->
+<div>
+ <p>The LLVM regression tests are driven by 'lit' and are located in
+ the <tt>llvm/test</tt> directory.
+
+ <p>This directory contains a large array of small tests
+ that exercise various features of LLVM and to ensure that regressions do not
+ occur. The directory is broken into several sub-directories, each focused on
+ a particular area of LLVM. A few of the important ones are:</p>
+
+ <ul>
+ <li><tt>Analysis</tt>: checks Analysis passes.</li>
+ <li><tt>Archive</tt>: checks the Archive library.</li>
+ <li><tt>Assembler</tt>: checks Assembly reader/writer functionality.</li>
+ <li><tt>Bitcode</tt>: checks Bitcode reader/writer functionality.</li>
+ <li><tt>CodeGen</tt>: checks code generation and each target.</li>
+ <li><tt>Features</tt>: checks various features of the LLVM language.</li>
+ <li><tt>Linker</tt>: tests bitcode linking.</li>
+ <li><tt>Transforms</tt>: tests each of the scalar, IPO, and utility
+ transforms to ensure they make the right transformations.</li>
+ <li><tt>Verifier</tt>: tests the IR verifier.</li>
+ </ul>
+
+<!-- _______________________________________________________________________ -->
+<h3><a name="rtcustom">Writing new regression tests</a></h3>
+<!-- _______________________________________________________________________ -->
+<div>
+ <p>The regression test structure is very simple, but does require some
+ information to be set. This information is gathered via <tt>configure</tt> and
+ is written to a file, <tt>lit.site.cfg</tt>
+ in <tt>llvm/test</tt>. The <tt>llvm/test</tt> Makefile does this work for
+ you.</p>
+
+ <p>In order for the regression tests to work, each directory of tests must
+ have a <tt>lit.local.cfg</tt> file. Lit looks for this file to determine how
+ to run the tests. This file is just Python code and thus is very flexible,
+ but we've standardized it for the LLVM regression tests. If you're adding a
+ directory of tests, just copy <tt>lit.local.cfg</tt> from another directory to
+ get running. The standard <tt>lit.local.cfg</tt> simply specifies which files
+ to look in for tests. Any directory that contains only directories does not
+ need the <tt>lit.local.cfg</tt> file. Read the
+ <a href="http://llvm.org/cmds/lit.html">Lit documentation</a> for more
+ information. </p>
+
+ <p>The <tt>llvm-runtests</tt> function looks at each file that is passed to
+ it and gathers any lines together that match "RUN:". These are the "RUN" lines
+ that specify how the test is to be run. So, each test script must contain
+ RUN lines if it is to do anything. If there are no RUN lines, the
+ <tt>llvm-runtests</tt> function will issue an error and the test will
+ fail.</p>
+
+ <p>RUN lines are specified in the comments of the test program using the
+ keyword <tt>RUN</tt> followed by a colon, and lastly the command (pipeline)
+ to execute. Together, these lines form the "script" that
+ <tt>llvm-runtests</tt> executes to run the test case. The syntax of the
+ RUN lines is similar to a shell's syntax for pipelines including I/O
+ redirection and variable substitution. However, even though these lines
+ may <i>look</i> like a shell script, they are not. RUN lines are interpreted
+ directly by the Tcl <tt>exec</tt> command. They are never executed by a
+ shell. Consequently the syntax differs from normal shell script syntax in a
+ few ways. You can specify as many RUN lines as needed.</p>
+
+ <p>lit performs substitution on each RUN line to replace LLVM tool
+ names with the full paths to the executable built for each tool (in
+ $(LLVM_OBJ_ROOT)/$(BuildMode)/bin). This ensures that lit does not
+ invoke any stray LLVM tools in the user's path during testing.</p>
+
+ <p>Each RUN line is executed on its own, distinct from other lines unless
+ its last character is <tt>\</tt>. This continuation character causes the RUN
+ line to be concatenated with the next one. In this way you can build up long
+ pipelines of commands without making huge line lengths. The lines ending in
+ <tt>\</tt> are concatenated until a RUN line that doesn't end in <tt>\</tt> is
+ found. This concatenated set of RUN lines then constitutes one execution.
+ Tcl will substitute variables and arrange for the pipeline to be executed. If
+ any process in the pipeline fails, the entire line (and test case) fails too.
+ </p>
+
+ <p> Below is an example of legal RUN lines in a <tt>.ll</tt> file:</p>
+
+<div class="doc_code">
+<pre>
+; RUN: llvm-as &lt; %s | llvm-dis &gt; %t1
+; RUN: llvm-dis &lt; %s.bc-13 &gt; %t2
+; RUN: diff %t1 %t2
+</pre>
+</div>
+
+ <p>As with a Unix shell, the RUN: lines permit pipelines and I/O redirection
+ to be used. However, the usage is slightly different than for Bash. To check
+ what's legal, see the documentation for the
+ <a href="http://www.tcl.tk/man/tcl8.5/TclCmd/exec.htm#M2">Tcl exec</a>
+ command and the
+ <a href="http://www.tcl.tk/man/tcl8.5/tutorial/Tcl26.html">tutorial</a>.
+ The major differences are:</p>
+ <ul>
+ <li>You can't do <tt>2&gt;&amp;1</tt>. That will cause Tcl to write to a
+ file named <tt>&amp;1</tt>. Usually this is done to get stderr to go through
+ a pipe. You can do that in tcl with <tt>|&amp;</tt> so replace this idiom:
+ <tt>... 2&gt;&amp;1 | grep</tt> with <tt>... |&amp; grep</tt></li>
+ <li>You can only redirect to a file, not to another descriptor and not from
+ a here document.</li>
+ <li>tcl supports redirecting to open files with the @ syntax but you
+ shouldn't use that here.</li>
+ </ul>
+
+ <p>There are some quoting rules that you must pay attention to when writing
+ your RUN lines. In general nothing needs to be quoted. Tcl won't strip off any
+ quote characters so they will get passed to the invoked program. For
+ example:</p>
+
+<div class="doc_code">
+<pre>
+... | grep 'find this string'
+</pre>
+</div>
+
+ <p>This will fail because the ' characters are passed to grep. This would
+ instruction grep to look for <tt>'find</tt> in the files <tt>this</tt> and
+ <tt>string'</tt>. To avoid this use curly braces to tell Tcl that it should
+ treat everything enclosed as one value. So our example would become:</p>
+
+<div class="doc_code">
+<pre>
+... | grep {find this string}
+</pre>
+</div>
+
+ <p>Additionally, the characters <tt>[</tt> and <tt>]</tt> are treated
+ specially by Tcl. They tell Tcl to interpret the content as a command to
+ execute. Since these characters are often used in regular expressions this can
+ have disastrous results and cause the entire test run in a directory to fail.
+ For example, a common idiom is to look for some basicblock number:</p>
+
+<div class="doc_code">
+<pre>
+... | grep bb[2-8]
+</pre>
+</div>
+
+ <p>This, however, will cause Tcl to fail because its going to try to execute
+ a program named "2-8". Instead, what you want is this:</p>
+
+<div class="doc_code">
+<pre>
+... | grep {bb\[2-8\]}
+</pre>
+</div>
+
+ <p>Finally, if you need to pass the <tt>\</tt> character down to a program,
+ then it must be doubled. This is another Tcl special character. So, suppose
+ you had:
+
+<div class="doc_code">
+<pre>
+... | grep 'i32\*'
+</pre>
+</div>
+
+ <p>This will fail to match what you want (a pointer to i32). First, the
+ <tt>'</tt> do not get stripped off. Second, the <tt>\</tt> gets stripped off
+ by Tcl so what grep sees is: <tt>'i32*'</tt>. That's not likely to match
+ anything. To resolve this you must use <tt>\\</tt> and the <tt>{}</tt>, like
+ this:</p>
+
+<div class="doc_code">
+<pre>
+... | grep {i32\\*}
+</pre>
+</div>
+
+<p>If your system includes GNU <tt>grep</tt>, make sure
+that <tt>GREP_OPTIONS</tt> is not set in your environment. Otherwise,
+you may get invalid results (both false positives and false
+negatives).</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h3><a name="FileCheck">The FileCheck utility</a></h3>
+<!-- _______________________________________________________________________ -->
+
+<div>
+
+<p>A powerful feature of the RUN: lines is that it allows any arbitrary commands
+ to be executed as part of the test harness. While standard (portable) unix
+ tools like 'grep' work fine on run lines, as you see above, there are a lot
+ of caveats due to interaction with Tcl syntax, and we want to make sure the
+ run lines are portable to a wide range of systems. Another major problem is
+ that grep is not very good at checking to verify that the output of a tools
+ contains a series of different output in a specific order. The FileCheck
+ tool was designed to help with these problems.</p>
+
+<p>FileCheck (whose basic command line arguments are described in <a
+ href="http://llvm.org/cmds/FileCheck.html">the FileCheck man page</a> is
+ designed to read a file to check from standard input, and the set of things
+ to verify from a file specified as a command line argument. A simple example
+ of using FileCheck from a RUN line looks like this:</p>
+
+<div class="doc_code">
+<pre>
+; RUN: llvm-as &lt; %s | llc -march=x86-64 | <b>FileCheck %s</b>
+</pre>
+</div>
+
+<p>This syntax says to pipe the current file ("%s") into llvm-as, pipe that into
+llc, then pipe the output of llc into FileCheck. This means that FileCheck will
+be verifying its standard input (the llc output) against the filename argument
+specified (the original .ll file specified by "%s"). To see how this works,
+let's look at the rest of the .ll file (after the RUN line):</p>
+
+<div class="doc_code">
+<pre>
+define void @sub1(i32* %p, i32 %v) {
+entry:
+; <b>CHECK: sub1:</b>
+; <b>CHECK: subl</b>
+ %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 %v)
+ ret void
+}
+
+define void @inc4(i64* %p) {
+entry:
+; <b>CHECK: inc4:</b>
+; <b>CHECK: incq</b>
+ %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 1)
+ ret void
+}
+</pre>
+</div>
+
+<p>Here you can see some "CHECK:" lines specified in comments. Now you can see
+how the file is piped into llvm-as, then llc, and the machine code output is
+what we are verifying. FileCheck checks the machine code output to verify that
+it matches what the "CHECK:" lines specify.</p>
+
+<p>The syntax of the CHECK: lines is very simple: they are fixed strings that
+must occur in order. FileCheck defaults to ignoring horizontal whitespace
+differences (e.g. a space is allowed to match a tab) but otherwise, the contents
+of the CHECK: line is required to match some thing in the test file exactly.</p>
+
+<p>One nice thing about FileCheck (compared to grep) is that it allows merging
+test cases together into logical groups. For example, because the test above
+is checking for the "sub1:" and "inc4:" labels, it will not match unless there
+is a "subl" in between those labels. If it existed somewhere else in the file,
+that would not count: "grep subl" matches if subl exists anywhere in the
+file.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="FileCheck-check-prefix">The FileCheck -check-prefix option</a>
+</h4>
+
+<div>
+
+<p>The FileCheck -check-prefix option allows multiple test configurations to be
+driven from one .ll file. This is useful in many circumstances, for example,
+testing different architectural variants with llc. Here's a simple example:</p>
+
+<div class="doc_code">
+<pre>
+; RUN: llvm-as &lt; %s | llc -mtriple=i686-apple-darwin9 -mattr=sse41 \
+; RUN: | <b>FileCheck %s -check-prefix=X32</b>
+; RUN: llvm-as &lt; %s | llc -mtriple=x86_64-apple-darwin9 -mattr=sse41 \
+; RUN: | <b>FileCheck %s -check-prefix=X64</b>
+
+define &lt;4 x i32&gt; @pinsrd_1(i32 %s, &lt;4 x i32&gt; %tmp) nounwind {
+ %tmp1 = insertelement &lt;4 x i32&gt; %tmp, i32 %s, i32 1
+ ret &lt;4 x i32&gt; %tmp1
+; <b>X32:</b> pinsrd_1:
+; <b>X32:</b> pinsrd $1, 4(%esp), %xmm0
+
+; <b>X64:</b> pinsrd_1:
+; <b>X64:</b> pinsrd $1, %edi, %xmm0
+}
+</pre>
+</div>
+
+<p>In this case, we're testing that we get the expected code generation with
+both 32-bit and 64-bit code generation.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="FileCheck-CHECK-NEXT">The "CHECK-NEXT:" directive</a>
+</h4>
+
+<div>
+
+<p>Sometimes you want to match lines and would like to verify that matches
+happen on exactly consecutive lines with no other lines in between them. In
+this case, you can use CHECK: and CHECK-NEXT: directives to specify this. If
+you specified a custom check prefix, just use "&lt;PREFIX&gt;-NEXT:". For
+example, something like this works as you'd expect:</p>
+
+<div class="doc_code">
+<pre>
+define void @t2(&lt;2 x double&gt;* %r, &lt;2 x double&gt;* %A, double %B) {
+ %tmp3 = load &lt;2 x double&gt;* %A, align 16
+ %tmp7 = insertelement &lt;2 x double&gt; undef, double %B, i32 0
+ %tmp9 = shufflevector &lt;2 x double&gt; %tmp3,
+ &lt;2 x double&gt; %tmp7,
+ &lt;2 x i32&gt; &lt; i32 0, i32 2 &gt;
+ store &lt;2 x double&gt; %tmp9, &lt;2 x double&gt;* %r, align 16
+ ret void
+
+; <b>CHECK:</b> t2:
+; <b>CHECK:</b> movl 8(%esp), %eax
+; <b>CHECK-NEXT:</b> movapd (%eax), %xmm0
+; <b>CHECK-NEXT:</b> movhpd 12(%esp), %xmm0
+; <b>CHECK-NEXT:</b> movl 4(%esp), %eax
+; <b>CHECK-NEXT:</b> movapd %xmm0, (%eax)
+; <b>CHECK-NEXT:</b> ret
+}
+</pre>
+</div>
+
+<p>CHECK-NEXT: directives reject the input unless there is exactly one newline
+between it an the previous directive. A CHECK-NEXT cannot be the first
+directive in a file.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="FileCheck-CHECK-NOT">The "CHECK-NOT:" directive</a>
+</h4>
+
+<div>
+
+<p>The CHECK-NOT: directive is used to verify that a string doesn't occur
+between two matches (or the first match and the beginning of the file). For
+example, to verify that a load is removed by a transformation, a test like this
+can be used:</p>
+
+<div class="doc_code">
+<pre>
+define i8 @coerce_offset0(i32 %V, i32* %P) {
+ store i32 %V, i32* %P
+
+ %P2 = bitcast i32* %P to i8*
+ %P3 = getelementptr i8* %P2, i32 2
+
+ %A = load i8* %P3
+ ret i8 %A
+; <b>CHECK:</b> @coerce_offset0
+; <b>CHECK-NOT:</b> load
+; <b>CHECK:</b> ret i8
+}
+</pre>
+</div>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="FileCheck-Matching">FileCheck Pattern Matching Syntax</a>
+</h4>
+
+<div>
+
+<!-- {% raw %} -->
+
+<p>The CHECK: and CHECK-NOT: directives both take a pattern to match. For most
+uses of FileCheck, fixed string matching is perfectly sufficient. For some
+things, a more flexible form of matching is desired. To support this, FileCheck
+allows you to specify regular expressions in matching strings, surrounded by
+double braces: <b>{{yourregex}}</b>. Because we want to use fixed string
+matching for a majority of what we do, FileCheck has been designed to support
+mixing and matching fixed string matching with regular expressions. This allows
+you to write things like this:</p>
+
+<div class="doc_code">
+<pre>
+; CHECK: movhpd <b>{{[0-9]+}}</b>(%esp), <b>{{%xmm[0-7]}}</b>
+</pre>
+</div>
+
+<p>In this case, any offset from the ESP register will be allowed, and any xmm
+register will be allowed.</p>
+
+<p>Because regular expressions are enclosed with double braces, they are
+visually distinct, and you don't need to use escape characters within the double
+braces like you would in C. In the rare case that you want to match double
+braces explicitly from the input, you can use something ugly like
+<b>{{[{][{]}}</b> as your pattern.</p>
+
+<!-- {% endraw %} -->
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="FileCheck-Variables">FileCheck Variables</a>
+</h4>
+
+<div>
+
+
+<!-- {% raw %} -->
+
+<p>It is often useful to match a pattern and then verify that it occurs again
+later in the file. For codegen tests, this can be useful to allow any register,
+but verify that that register is used consistently later. To do this, FileCheck
+allows named variables to be defined and substituted into patterns. Here is a
+simple example:</p>
+
+<div class="doc_code">
+<pre>
+; CHECK: test5:
+; CHECK: notw <b>[[REGISTER:%[a-z]+]]</b>
+; CHECK: andw {{.*}}<b>[[REGISTER]]</b>
+</pre>
+</div>
+
+<p>The first check line matches a regex (<tt>%[a-z]+</tt>) and captures it into
+the variables "REGISTER". The second line verifies that whatever is in REGISTER
+occurs later in the file after an "andw". FileCheck variable references are
+always contained in <tt>[[ ]]</tt> pairs, are named, and their names can be
+formed with the regex "<tt>[a-zA-Z][a-zA-Z0-9]*</tt>". If a colon follows the
+name, then it is a definition of the variable, if not, it is a use.</p>
+
+<p>FileCheck variables can be defined multiple times, and uses always get the
+latest value. Note that variables are all read at the start of a "CHECK" line
+and are all defined at the end. This means that if you have something like
+"<tt>CHECK: [[XYZ:.*]]x[[XYZ]]</tt>" that the check line will read the previous
+value of the XYZ variable and define a new one after the match is performed. If
+you need to do something like this you can probably take advantage of the fact
+that FileCheck is not actually line-oriented when it matches, this allows you to
+define two separate CHECK lines that match on the same line.
+</p>
+
+<!-- {% endraw %} -->
+
+</div>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h3><a name="rtvars">Variables and substitutions</a></h3>
+<!-- _______________________________________________________________________ -->
+<div>
+ <p>With a RUN line there are a number of substitutions that are permitted. In
+ general, any Tcl variable that is available in the <tt>substitute</tt>
+ function (in <tt>test/lib/llvm.exp</tt>) can be substituted into a RUN line.
+ To make a substitution just write the variable's name preceded by a $.
+ Additionally, for compatibility reasons with previous versions of the test
+ library, certain names can be accessed with an alternate syntax: a % prefix.
+ These alternates are deprecated and may go away in a future version.
+ </p>
+ <p>Here are the available variable names. The alternate syntax is listed in
+ parentheses.</p>
+
+ <dl style="margin-left: 25px">
+ <dt><b>$test</b> (%s)</dt>
+ <dd>The full path to the test case's source. This is suitable for passing
+ on the command line as the input to an llvm tool.</dd>
+
+ <dt><b>$srcdir</b></dt>
+ <dd>The source directory from where the "<tt>make check</tt>" was run.</dd>
+
+ <dt><b>objdir</b></dt>
+ <dd>The object directory that corresponds to the <tt>$srcdir</tt>.</dd>
+
+ <dt><b>subdir</b></dt>
+ <dd>A partial path from the <tt>test</tt> directory that contains the
+ sub-directory that contains the test source being executed.</dd>
+
+ <dt><b>srcroot</b></dt>
+ <dd>The root directory of the LLVM src tree.</dd>
+
+ <dt><b>objroot</b></dt>
+ <dd>The root directory of the LLVM object tree. This could be the same
+ as the srcroot.</dd>
+
+ <dt><b>path</b><dt>
+ <dd>The path to the directory that contains the test case source. This is
+ for locating any supporting files that are not generated by the test, but
+ used by the test.</dd>
+
+ <dt><b>tmp</b></dt>
+ <dd>The path to a temporary file name that could be used for this test case.
+ The file name won't conflict with other test cases. You can append to it if
+ you need multiple temporaries. This is useful as the destination of some
+ redirected output.</dd>
+
+ <dt><b>target_triplet</b> (%target_triplet)</dt>
+ <dd>The target triplet that corresponds to the current host machine (the one
+ running the test cases). This should probably be called "host".<dd>
+
+ <dt><b>link</b> (%link)</dt>
+ <dd>This full link command used to link LLVM executables. This has all the
+ configured -I, -L and -l options.</dd>
+
+ <dt><b>shlibext</b> (%shlibext)</dt>
+ <dd>The suffix for the host platforms share library (dll) files. This
+ includes the period as the first character.</dd>
+ </dl>
+ <p>To add more variables, two things need to be changed. First, add a line in
+ the <tt>test/Makefile</tt> that creates the <tt>site.exp</tt> file. This will
+ "set" the variable as a global in the site.exp file. Second, in the
+ <tt>test/lib/llvm.exp</tt> file, in the substitute proc, add the variable name
+ to the list of "global" declarations at the beginning of the proc. That's it,
+ the variable can then be used in test scripts.</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h3><a name="rtfeatures">Other Features</a></h3>
+<!-- _______________________________________________________________________ -->
+<div>
+ <p>To make RUN line writing easier, there are several shell scripts located
+ in the <tt>llvm/test/Scripts</tt> directory. This directory is in the PATH
+ when running tests, so you can just call these scripts using their name. For
+ example:</p>
+ <dl>
+ <dt><b>ignore</b></dt>
+ <dd>This script runs its arguments and then always returns 0. This is useful
+ in cases where the test needs to cause a tool to generate an error (e.g. to
+ check the error output). However, any program in a pipeline that returns a
+ non-zero result will cause the test to fail. This script overcomes that
+ issue and nicely documents that the test case is purposefully ignoring the
+ result code of the tool</dd>
+
+ <dt><b>not</b></dt>
+ <dd>This script runs its arguments and then inverts the result code from
+ it. Zero result codes become 1. Non-zero result codes become 0. This is
+ useful to invert the result of a grep. For example "not grep X" means
+ succeed only if you don't find X in the input.</dd>
+ </dl>
+
+ <p>Sometimes it is necessary to mark a test case as "expected fail" or XFAIL.
+ You can easily mark a test as XFAIL just by including <tt>XFAIL: </tt> on a
+ line near the top of the file. This signals that the test case should succeed
+ if the test fails. Such test cases are counted separately by the testing tool. To
+ specify an expected fail, use the XFAIL keyword in the comments of the test
+ program followed by a colon and one or more regular expressions (separated by
+ a comma). The regular expressions allow you to XFAIL the test conditionally by
+ host platform. The regular expressions following the : are matched against the
+ target triplet for the host machine. If there is a match, the test is expected
+ to fail. If not, the test is expected to succeed. To XFAIL everywhere just
+ specify <tt>XFAIL: *</tt>. Here is an example of an <tt>XFAIL</tt> line:</p>
+
+<div class="doc_code">
+<pre>
+; XFAIL: darwin,sun
+</pre>
+</div>
+
+ <p>To make the output more useful, the <tt>llvm_runtest</tt> function wil
+ scan the lines of the test case for ones that contain a pattern that matches
+ PR[0-9]+. This is the syntax for specifying a PR (Problem Report) number that
+ is related to the test case. The number after "PR" specifies the LLVM bugzilla
+ number. When a PR number is specified, it will be used in the pass/fail
+ reporting. This is useful to quickly get some context when a test fails.</p>
+
+ <p>Finally, any line that contains "END." will cause the special
+ interpretation of lines to terminate. This is generally done right after the
+ last RUN: line. This has two side effects: (a) it prevents special
+ interpretation of lines that are part of the test program, not the
+ instructions to the test case, and (b) it speeds things up for really big test
+ cases by avoiding interpretation of the remainder of the file.</p>
+
+</div>
+
+</div>
+
+<!--=========================================================================-->
+<h2><a name="testsuiteoverview"><tt>test-suite</tt> Overview</a></h2>
+<!--=========================================================================-->
+
+<div>
+
+<p>The <tt>test-suite</tt> module contains a number of programs that can be
+compiled and executed. The <tt>test-suite</tt> includes reference outputs for
+all of the programs, so that the output of the executed program can be checked
+for correctness.</p>
+
+<p><tt>test-suite</tt> tests are divided into three types of tests: MultiSource,
+SingleSource, and External.</p>
+
+<ul>
+<li><tt>test-suite/SingleSource</tt>
+<p>The SingleSource directory contains test programs that are only a single
+source file in size. These are usually small benchmark programs or small
+programs that calculate a particular value. Several such programs are grouped
+together in each directory.</p></li>
+
+<li><tt>test-suite/MultiSource</tt>
+<p>The MultiSource directory contains subdirectories which contain entire
+programs with multiple source files. Large benchmarks and whole applications
+go here.</p></li>
+
+<li><tt>test-suite/External</tt>
+<p>The External directory contains Makefiles for building code that is external
+to (i.e., not distributed with) LLVM. The most prominent members of this
+directory are the SPEC 95 and SPEC 2000 benchmark suites. The <tt>External</tt>
+directory does not contain these actual tests, but only the Makefiles that know
+how to properly compile these programs from somewhere else. When
+using <tt>LNT</tt>, use the <tt>--test-externals</tt> option to include these
+tests in the results.</p></li>
+</ul>
+</div>
+
+<!--=========================================================================-->
+<h2><a name="testsuitequickstart"><tt>test-suite</tt> Quickstart</a></h2>
+<!--=========================================================================-->
+
+<div>
+<p>The modern way of running the <tt>test-suite</tt> is focused on testing and
+benchmarking complete compilers using
+the <a href="http://llvm.org/docs/lnt">LNT</a> testing infrastructure.</p>
+
+<p>For more information on using LNT to execute the <tt>test-suite</tt>, please
+see the <a href="http://llvm.org/docs/lnt/quickstart.html">LNT Quickstart</a>
+documentation.</p>
+</div>
+
+<!--=========================================================================-->
+<h2><a name="testsuitemakefiles"><tt>test-suite</tt> Makefiles</a></h2>
+<!--=========================================================================-->
+
+<div>
+<p>Historically, the <tt>test-suite</tt> was executed using a complicated setup
+of Makefiles. The LNT based approach above is recommended for most users, but
+there are some testing scenarios which are not supported by the LNT approach. In
+addition, LNT currently uses the Makefile setup under the covers and so
+developers who are interested in how LNT works under the hood may want to
+understand the Makefile based setup.</p>
+
+<p>For more information on the <tt>test-suite</tt> Makefile setup, please see
+the <a href="TestSuiteMakefileGuide.html">Test Suite Makefile Guide.</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ John T. Criswell, Daniel Dunbar, Reid Spencer, and Tanya Lattner<br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/WritingAnLLVMBackend.html b/docs/WritingAnLLVMBackend.html
new file mode 100644
index 00000000000..441d122f539
--- /dev/null
+++ b/docs/WritingAnLLVMBackend.html
@@ -0,0 +1,2533 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>Writing an LLVM Compiler Backend</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>
+ Writing an LLVM Compiler Backend
+</h1>
+
+<ol>
+ <li><a href="#intro">Introduction</a>
+ <ul>
+ <li><a href="#Audience">Audience</a></li>
+ <li><a href="#Prerequisite">Prerequisite Reading</a></li>
+ <li><a href="#Basic">Basic Steps</a></li>
+ <li><a href="#Preliminaries">Preliminaries</a></li>
+ </ul>
+ <li><a href="#TargetMachine">Target Machine</a></li>
+ <li><a href="#TargetRegistration">Target Registration</a></li>
+ <li><a href="#RegisterSet">Register Set and Register Classes</a>
+ <ul>
+ <li><a href="#RegisterDef">Defining a Register</a></li>
+ <li><a href="#RegisterClassDef">Defining a Register Class</a></li>
+ <li><a href="#implementRegister">Implement a subclass of TargetRegisterInfo</a></li>
+ </ul></li>
+ <li><a href="#InstructionSet">Instruction Set</a>
+ <ul>
+ <li><a href="#operandMapping">Instruction Operand Mapping</a></li>
+ <li><a href="#implementInstr">Implement a subclass of TargetInstrInfo</a></li>
+ <li><a href="#branchFolding">Branch Folding and If Conversion</a></li>
+ </ul></li>
+ <li><a href="#InstructionSelector">Instruction Selector</a>
+ <ul>
+ <li><a href="#LegalizePhase">The SelectionDAG Legalize Phase</a>
+ <ul>
+ <li><a href="#promote">Promote</a></li>
+ <li><a href="#expand">Expand</a></li>
+ <li><a href="#custom">Custom</a></li>
+ <li><a href="#legal">Legal</a></li>
+ </ul></li>
+ <li><a href="#callingConventions">Calling Conventions</a></li>
+ </ul></li>
+ <li><a href="#assemblyPrinter">Assembly Printer</a></li>
+ <li><a href="#subtargetSupport">Subtarget Support</a></li>
+ <li><a href="#jitSupport">JIT Support</a>
+ <ul>
+ <li><a href="#mce">Machine Code Emitter</a></li>
+ <li><a href="#targetJITInfo">Target JIT Info</a></li>
+ </ul></li>
+</ol>
+
+<div class="doc_author">
+ <p>Written by <a href="http://www.woo.com">Mason Woo</a> and
+ <a href="http://misha.brukman.net">Misha Brukman</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="intro">Introduction</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+This document describes techniques for writing compiler backends that convert
+the LLVM Intermediate Representation (IR) to code for a specified machine or
+other languages. Code intended for a specific machine can take the form of
+either assembly code or binary code (usable for a JIT compiler).
+</p>
+
+<p>
+The backend of LLVM features a target-independent code generator that may create
+output for several types of target CPUs &mdash; including X86, PowerPC, ARM,
+and SPARC. The backend may also be used to generate code targeted at SPUs of the
+Cell processor or GPUs to support the execution of compute kernels.
+</p>
+
+<p>
+The document focuses on existing examples found in subdirectories
+of <tt>llvm/lib/Target</tt> in a downloaded LLVM release. In particular, this
+document focuses on the example of creating a static compiler (one that emits
+text assembly) for a SPARC target, because SPARC has fairly standard
+characteristics, such as a RISC instruction set and straightforward calling
+conventions.
+</p>
+
+<h3>
+ <a name="Audience">Audience</a>
+</h3>
+
+<div>
+
+<p>
+The audience for this document is anyone who needs to write an LLVM backend to
+generate code for a specific hardware or software target.
+</p>
+
+</div>
+
+<h3>
+ <a name="Prerequisite">Prerequisite Reading</a>
+</h3>
+
+<div>
+
+<p>
+These essential documents must be read before reading this document:
+</p>
+
+<ul>
+<li><i><a href="LangRef.html">LLVM Language Reference
+ Manual</a></i> &mdash; a reference manual for the LLVM assembly language.</li>
+
+<li><i><a href="CodeGenerator.html">The LLVM
+ Target-Independent Code Generator</a></i> &mdash; a guide to the components
+ (classes and code generation algorithms) for translating the LLVM internal
+ representation into machine code for a specified target. Pay particular
+ attention to the descriptions of code generation stages: Instruction
+ Selection, Scheduling and Formation, SSA-based Optimization, Register
+ Allocation, Prolog/Epilog Code Insertion, Late Machine Code Optimizations,
+ and Code Emission.</li>
+
+<li><i><a href="TableGenFundamentals.html">TableGen
+ Fundamentals</a></i> &mdash;a document that describes the TableGen
+ (<tt>tblgen</tt>) application that manages domain-specific information to
+ support LLVM code generation. TableGen processes input from a target
+ description file (<tt>.td</tt> suffix) and generates C++ code that can be
+ used for code generation.</li>
+
+<li><i><a href="WritingAnLLVMPass.html">Writing an LLVM
+ Pass</a></i> &mdash; The assembly printer is a <tt>FunctionPass</tt>, as are
+ several SelectionDAG processing steps.</li>
+</ul>
+
+<p>
+To follow the SPARC examples in this document, have a copy of
+<i><a href="http://www.sparc.org/standards/V8.pdf">The SPARC Architecture
+Manual, Version 8</a></i> for reference. For details about the ARM instruction
+set, refer to the <i><a href="http://infocenter.arm.com/">ARM Architecture
+Reference Manual</a></i>. For more about the GNU Assembler format
+(<tt>GAS</tt>), see
+<i><a href="http://sourceware.org/binutils/docs/as/index.html">Using As</a></i>,
+especially for the assembly printer. <i>Using As</i> contains a list of target
+machine dependent features.
+</p>
+
+</div>
+
+<h3>
+ <a name="Basic">Basic Steps</a>
+</h3>
+
+<div>
+
+<p>
+To write a compiler backend for LLVM that converts the LLVM IR to code for a
+specified target (machine or other language), follow these steps:
+</p>
+
+<ul>
+<li>Create a subclass of the TargetMachine class that describes characteristics
+ of your target machine. Copy existing examples of specific TargetMachine
+ class and header files; for example, start with
+ <tt>SparcTargetMachine.cpp</tt> and <tt>SparcTargetMachine.h</tt>, but
+ change the file names for your target. Similarly, change code that
+ references "Sparc" to reference your target. </li>
+
+<li>Describe the register set of the target. Use TableGen to generate code for
+ register definition, register aliases, and register classes from a
+ target-specific <tt>RegisterInfo.td</tt> input file. You should also write
+ additional code for a subclass of the TargetRegisterInfo class that
+ represents the class register file data used for register allocation and
+ also describes the interactions between registers.</li>
+
+<li>Describe the instruction set of the target. Use TableGen to generate code
+ for target-specific instructions from target-specific versions of
+ <tt>TargetInstrFormats.td</tt> and <tt>TargetInstrInfo.td</tt>. You should
+ write additional code for a subclass of the TargetInstrInfo class to
+ represent machine instructions supported by the target machine. </li>
+
+<li>Describe the selection and conversion of the LLVM IR from a Directed Acyclic
+ Graph (DAG) representation of instructions to native target-specific
+ instructions. Use TableGen to generate code that matches patterns and
+ selects instructions based on additional information in a target-specific
+ version of <tt>TargetInstrInfo.td</tt>. Write code
+ for <tt>XXXISelDAGToDAG.cpp</tt>, where XXX identifies the specific target,
+ to perform pattern matching and DAG-to-DAG instruction selection. Also write
+ code in <tt>XXXISelLowering.cpp</tt> to replace or remove operations and
+ data types that are not supported natively in a SelectionDAG. </li>
+
+<li>Write code for an assembly printer that converts LLVM IR to a GAS format for
+ your target machine. You should add assembly strings to the instructions
+ defined in your target-specific version of <tt>TargetInstrInfo.td</tt>. You
+ should also write code for a subclass of AsmPrinter that performs the
+ LLVM-to-assembly conversion and a trivial subclass of TargetAsmInfo.</li>
+
+<li>Optionally, add support for subtargets (i.e., variants with different
+ capabilities). You should also write code for a subclass of the
+ TargetSubtarget class, which allows you to use the <tt>-mcpu=</tt>
+ and <tt>-mattr=</tt> command-line options.</li>
+
+<li>Optionally, add JIT support and create a machine code emitter (subclass of
+ TargetJITInfo) that is used to emit binary code directly into memory. </li>
+</ul>
+
+<p>
+In the <tt>.cpp</tt> and <tt>.h</tt>. files, initially stub up these methods and
+then implement them later. Initially, you may not know which private members
+that the class will need and which components will need to be subclassed.
+</p>
+
+</div>
+
+<h3>
+ <a name="Preliminaries">Preliminaries</a>
+</h3>
+
+<div>
+
+<p>
+To actually create your compiler backend, you need to create and modify a few
+files. The absolute minimum is discussed here. But to actually use the LLVM
+target-independent code generator, you must perform the steps described in
+the <a href="CodeGenerator.html">LLVM
+Target-Independent Code Generator</a> document.
+</p>
+
+<p>
+First, you should create a subdirectory under <tt>lib/Target</tt> to hold all
+the files related to your target. If your target is called "Dummy," create the
+directory <tt>lib/Target/Dummy</tt>.
+</p>
+
+<p>
+In this new
+directory, create a <tt>Makefile</tt>. It is easiest to copy a
+<tt>Makefile</tt> of another target and modify it. It should at least contain
+the <tt>LEVEL</tt>, <tt>LIBRARYNAME</tt> and <tt>TARGET</tt> variables, and then
+include <tt>$(LEVEL)/Makefile.common</tt>. The library can be
+named <tt>LLVMDummy</tt> (for example, see the MIPS target). Alternatively, you
+can split the library into <tt>LLVMDummyCodeGen</tt>
+and <tt>LLVMDummyAsmPrinter</tt>, the latter of which should be implemented in a
+subdirectory below <tt>lib/Target/Dummy</tt> (for example, see the PowerPC
+target).
+</p>
+
+<p>
+Note that these two naming schemes are hardcoded into <tt>llvm-config</tt>.
+Using any other naming scheme will confuse <tt>llvm-config</tt> and produce a
+lot of (seemingly unrelated) linker errors when linking <tt>llc</tt>.
+</p>
+
+<p>
+To make your target actually do something, you need to implement a subclass of
+<tt>TargetMachine</tt>. This implementation should typically be in the file
+<tt>lib/Target/DummyTargetMachine.cpp</tt>, but any file in
+the <tt>lib/Target</tt> directory will be built and should work. To use LLVM's
+target independent code generator, you should do what all current machine
+backends do: create a subclass of <tt>LLVMTargetMachine</tt>. (To create a
+target from scratch, create a subclass of <tt>TargetMachine</tt>.)
+</p>
+
+<p>
+To get LLVM to actually build and link your target, you need to add it to
+the <tt>TARGETS_TO_BUILD</tt> variable. To do this, you modify the configure
+script to know about your target when parsing the <tt>--enable-targets</tt>
+option. Search the configure script for <tt>TARGETS_TO_BUILD</tt>, add your
+target to the lists there (some creativity required), and then
+reconfigure. Alternatively, you can change <tt>autotools/configure.ac</tt> and
+regenerate configure by running <tt>./autoconf/AutoRegen.sh</tt>.
+</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="TargetMachine">Target Machine</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+<tt>LLVMTargetMachine</tt> is designed as a base class for targets implemented
+with the LLVM target-independent code generator. The <tt>LLVMTargetMachine</tt>
+class should be specialized by a concrete target class that implements the
+various virtual methods. <tt>LLVMTargetMachine</tt> is defined as a subclass of
+<tt>TargetMachine</tt> in <tt>include/llvm/Target/TargetMachine.h</tt>. The
+<tt>TargetMachine</tt> class implementation (<tt>TargetMachine.cpp</tt>) also
+processes numerous command-line options.
+</p>
+
+<p>
+To create a concrete target-specific subclass of <tt>LLVMTargetMachine</tt>,
+start by copying an existing <tt>TargetMachine</tt> class and header. You
+should name the files that you create to reflect your specific target. For
+instance, for the SPARC target, name the files <tt>SparcTargetMachine.h</tt> and
+<tt>SparcTargetMachine.cpp</tt>.
+</p>
+
+<p>
+For a target machine <tt>XXX</tt>, the implementation of
+<tt>XXXTargetMachine</tt> must have access methods to obtain objects that
+represent target components. These methods are named <tt>get*Info</tt>, and are
+intended to obtain the instruction set (<tt>getInstrInfo</tt>), register set
+(<tt>getRegisterInfo</tt>), stack frame layout (<tt>getFrameInfo</tt>), and
+similar information. <tt>XXXTargetMachine</tt> must also implement the
+<tt>getTargetData</tt> method to access an object with target-specific data
+characteristics, such as data type size and alignment requirements.
+</p>
+
+<p>
+For instance, for the SPARC target, the header file
+<tt>SparcTargetMachine.h</tt> declares prototypes for several <tt>get*Info</tt>
+and <tt>getTargetData</tt> methods that simply return a class member.
+</p>
+
+<div class="doc_code">
+<pre>
+namespace llvm {
+
+class Module;
+
+class SparcTargetMachine : public LLVMTargetMachine {
+ const TargetData DataLayout; // Calculates type size &amp; alignment
+ SparcSubtarget Subtarget;
+ SparcInstrInfo InstrInfo;
+ TargetFrameInfo FrameInfo;
+
+protected:
+ virtual const TargetAsmInfo *createTargetAsmInfo() const;
+
+public:
+ SparcTargetMachine(const Module &amp;M, const std::string &amp;FS);
+
+ virtual const SparcInstrInfo *getInstrInfo() const {return &amp;InstrInfo; }
+ virtual const TargetFrameInfo *getFrameInfo() const {return &amp;FrameInfo; }
+ virtual const TargetSubtarget *getSubtargetImpl() const{return &amp;Subtarget; }
+ virtual const TargetRegisterInfo *getRegisterInfo() const {
+ return &amp;InstrInfo.getRegisterInfo();
+ }
+ virtual const TargetData *getTargetData() const { return &amp;DataLayout; }
+ static unsigned getModuleMatchQuality(const Module &amp;M);
+
+ // Pass Pipeline Configuration
+ virtual bool addInstSelector(PassManagerBase &amp;PM, bool Fast);
+ virtual bool addPreEmitPass(PassManagerBase &amp;PM, bool Fast);
+};
+
+} // end namespace llvm
+</pre>
+</div>
+
+<ul>
+<li><tt>getInstrInfo()</tt></li>
+<li><tt>getRegisterInfo()</tt></li>
+<li><tt>getFrameInfo()</tt></li>
+<li><tt>getTargetData()</tt></li>
+<li><tt>getSubtargetImpl()</tt></li>
+</ul>
+
+<p>For some targets, you also need to support the following methods:</p>
+
+<ul>
+<li><tt>getTargetLowering()</tt></li>
+<li><tt>getJITInfo()</tt></li>
+</ul>
+
+<p>
+In addition, the <tt>XXXTargetMachine</tt> constructor should specify a
+<tt>TargetDescription</tt> string that determines the data layout for the target
+machine, including characteristics such as pointer size, alignment, and
+endianness. For example, the constructor for SparcTargetMachine contains the
+following:
+</p>
+
+<div class="doc_code">
+<pre>
+SparcTargetMachine::SparcTargetMachine(const Module &amp;M, const std::string &amp;FS)
+ : DataLayout("E-p:32:32-f128:128:128"),
+ Subtarget(M, FS), InstrInfo(Subtarget),
+ FrameInfo(TargetFrameInfo::StackGrowsDown, 8, 0) {
+}
+</pre>
+</div>
+
+<p>Hyphens separate portions of the <tt>TargetDescription</tt> string.</p>
+
+<ul>
+<li>An upper-case "<tt>E</tt>" in the string indicates a big-endian target data
+ model. a lower-case "<tt>e</tt>" indicates little-endian.</li>
+
+<li>"<tt>p:</tt>" is followed by pointer information: size, ABI alignment, and
+ preferred alignment. If only two figures follow "<tt>p:</tt>", then the
+ first value is pointer size, and the second value is both ABI and preferred
+ alignment.</li>
+
+<li>Then a letter for numeric type alignment: "<tt>i</tt>", "<tt>f</tt>",
+ "<tt>v</tt>", or "<tt>a</tt>" (corresponding to integer, floating point,
+ vector, or aggregate). "<tt>i</tt>", "<tt>v</tt>", or "<tt>a</tt>" are
+ followed by ABI alignment and preferred alignment. "<tt>f</tt>" is followed
+ by three values: the first indicates the size of a long double, then ABI
+ alignment, and then ABI preferred alignment.</li>
+</ul>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="TargetRegistration">Target Registration</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+You must also register your target with the <tt>TargetRegistry</tt>, which is
+what other LLVM tools use to be able to lookup and use your target at
+runtime. The <tt>TargetRegistry</tt> can be used directly, but for most targets
+there are helper templates which should take care of the work for you.</p>
+
+<p>
+All targets should declare a global <tt>Target</tt> object which is used to
+represent the target during registration. Then, in the target's TargetInfo
+library, the target should define that object and use
+the <tt>RegisterTarget</tt> template to register the target. For example, the Sparc registration code looks like this:
+</p>
+
+<div class="doc_code">
+<pre>
+Target llvm::TheSparcTarget;
+
+extern "C" void LLVMInitializeSparcTargetInfo() {
+ RegisterTarget&lt;Triple::sparc, /*HasJIT=*/false&gt;
+ X(TheSparcTarget, "sparc", "Sparc");
+}
+</pre>
+</div>
+
+<p>
+This allows the <tt>TargetRegistry</tt> to look up the target by name or by
+target triple. In addition, most targets will also register additional features
+which are available in separate libraries. These registration steps are
+separate, because some clients may wish to only link in some parts of the target
+-- the JIT code generator does not require the use of the assembler printer, for
+example. Here is an example of registering the Sparc assembly printer:
+</p>
+
+<div class="doc_code">
+<pre>
+extern "C" void LLVMInitializeSparcAsmPrinter() {
+ RegisterAsmPrinter&lt;SparcAsmPrinter&gt; X(TheSparcTarget);
+}
+</pre>
+</div>
+
+<p>
+For more information, see
+"<a href="/doxygen/TargetRegistry_8h-source.html">llvm/Target/TargetRegistry.h</a>".
+</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="RegisterSet">Register Set and Register Classes</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+You should describe a concrete target-specific class that represents the
+register file of a target machine. This class is called <tt>XXXRegisterInfo</tt>
+(where <tt>XXX</tt> identifies the target) and represents the class register
+file data that is used for register allocation. It also describes the
+interactions between registers.
+</p>
+
+<p>
+You also need to define register classes to categorize related registers. A
+register class should be added for groups of registers that are all treated the
+same way for some instruction. Typical examples are register classes for
+integer, floating-point, or vector registers. A register allocator allows an
+instruction to use any register in a specified register class to perform the
+instruction in a similar manner. Register classes allocate virtual registers to
+instructions from these sets, and register classes let the target-independent
+register allocator automatically choose the actual registers.
+</p>
+
+<p>
+Much of the code for registers, including register definition, register aliases,
+and register classes, is generated by TableGen from <tt>XXXRegisterInfo.td</tt>
+input files and placed in <tt>XXXGenRegisterInfo.h.inc</tt> and
+<tt>XXXGenRegisterInfo.inc</tt> output files. Some of the code in the
+implementation of <tt>XXXRegisterInfo</tt> requires hand-coding.
+</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="RegisterDef">Defining a Register</a>
+</h3>
+
+<div>
+
+<p>
+The <tt>XXXRegisterInfo.td</tt> file typically starts with register definitions
+for a target machine. The <tt>Register</tt> class (specified
+in <tt>Target.td</tt>) is used to define an object for each register. The
+specified string <tt>n</tt> becomes the <tt>Name</tt> of the register. The
+basic <tt>Register</tt> object does not have any subregisters and does not
+specify any aliases.
+</p>
+
+<div class="doc_code">
+<pre>
+class Register&lt;string n&gt; {
+ string Namespace = "";
+ string AsmName = n;
+ string Name = n;
+ int SpillSize = 0;
+ int SpillAlignment = 0;
+ list&lt;Register&gt; Aliases = [];
+ list&lt;Register&gt; SubRegs = [];
+ list&lt;int&gt; DwarfNumbers = [];
+}
+</pre>
+</div>
+
+<p>
+For example, in the <tt>X86RegisterInfo.td</tt> file, there are register
+definitions that utilize the Register class, such as:
+</p>
+
+<div class="doc_code">
+<pre>
+def AL : Register&lt;"AL"&gt;, DwarfRegNum&lt;[0, 0, 0]&gt;;
+</pre>
+</div>
+
+<p>
+This defines the register <tt>AL</tt> and assigns it values (with
+<tt>DwarfRegNum</tt>) that are used by <tt>gcc</tt>, <tt>gdb</tt>, or a debug
+information writer to identify a register. For register
+<tt>AL</tt>, <tt>DwarfRegNum</tt> takes an array of 3 values representing 3
+different modes: the first element is for X86-64, the second for exception
+handling (EH) on X86-32, and the third is generic. -1 is a special Dwarf number
+that indicates the gcc number is undefined, and -2 indicates the register number
+is invalid for this mode.
+</p>
+
+<p>
+From the previously described line in the <tt>X86RegisterInfo.td</tt> file,
+TableGen generates this code in the <tt>X86GenRegisterInfo.inc</tt> file:
+</p>
+
+<div class="doc_code">
+<pre>
+static const unsigned GR8[] = { X86::AL, ... };
+
+const unsigned AL_AliasSet[] = { X86::AX, X86::EAX, X86::RAX, 0 };
+
+const TargetRegisterDesc RegisterDescriptors[] = {
+ ...
+{ "AL", "AL", AL_AliasSet, Empty_SubRegsSet, Empty_SubRegsSet, AL_SuperRegsSet }, ...
+</pre>
+</div>
+
+<p>
+From the register info file, TableGen generates a <tt>TargetRegisterDesc</tt>
+object for each register. <tt>TargetRegisterDesc</tt> is defined in
+<tt>include/llvm/Target/TargetRegisterInfo.h</tt> with the following fields:
+</p>
+
+<div class="doc_code">
+<pre>
+struct TargetRegisterDesc {
+ const char *AsmName; // Assembly language name for the register
+ const char *Name; // Printable name for the reg (for debugging)
+ const unsigned *AliasSet; // Register Alias Set
+ const unsigned *SubRegs; // Sub-register set
+ const unsigned *ImmSubRegs; // Immediate sub-register set
+ const unsigned *SuperRegs; // Super-register set
+};</pre>
+</div>
+
+<p>
+TableGen uses the entire target description file (<tt>.td</tt>) to determine
+text names for the register (in the <tt>AsmName</tt> and <tt>Name</tt> fields of
+<tt>TargetRegisterDesc</tt>) and the relationships of other registers to the
+defined register (in the other <tt>TargetRegisterDesc</tt> fields). In this
+example, other definitions establish the registers "<tt>AX</tt>",
+"<tt>EAX</tt>", and "<tt>RAX</tt>" as aliases for one another, so TableGen
+generates a null-terminated array (<tt>AL_AliasSet</tt>) for this register alias
+set.
+</p>
+
+<p>
+The <tt>Register</tt> class is commonly used as a base class for more complex
+classes. In <tt>Target.td</tt>, the <tt>Register</tt> class is the base for the
+<tt>RegisterWithSubRegs</tt> class that is used to define registers that need to
+specify subregisters in the <tt>SubRegs</tt> list, as shown here:
+</p>
+
+<div class="doc_code">
+<pre>
+class RegisterWithSubRegs&lt;string n,
+list&lt;Register&gt; subregs&gt; : Register&lt;n&gt; {
+ let SubRegs = subregs;
+}
+</pre>
+</div>
+
+<p>
+In <tt>SparcRegisterInfo.td</tt>, additional register classes are defined for
+SPARC: a Register subclass, SparcReg, and further subclasses: <tt>Ri</tt>,
+<tt>Rf</tt>, and <tt>Rd</tt>. SPARC registers are identified by 5-bit ID
+numbers, which is a feature common to these subclasses. Note the use of
+'<tt>let</tt>' expressions to override values that are initially defined in a
+superclass (such as <tt>SubRegs</tt> field in the <tt>Rd</tt> class).
+</p>
+
+<div class="doc_code">
+<pre>
+class SparcReg&lt;string n&gt; : Register&lt;n&gt; {
+ field bits&lt;5&gt; Num;
+ let Namespace = "SP";
+}
+// Ri - 32-bit integer registers
+class Ri&lt;bits&lt;5&gt; num, string n&gt; :
+SparcReg&lt;n&gt; {
+ let Num = num;
+}
+// Rf - 32-bit floating-point registers
+class Rf&lt;bits&lt;5&gt; num, string n&gt; :
+SparcReg&lt;n&gt; {
+ let Num = num;
+}
+// Rd - Slots in the FP register file for 64-bit
+floating-point values.
+class Rd&lt;bits&lt;5&gt; num, string n,
+list&lt;Register&gt; subregs&gt; : SparcReg&lt;n&gt; {
+ let Num = num;
+ let SubRegs = subregs;
+}
+</pre>
+</div>
+
+<p>
+In the <tt>SparcRegisterInfo.td</tt> file, there are register definitions that
+utilize these subclasses of <tt>Register</tt>, such as:
+</p>
+
+<div class="doc_code">
+<pre>
+def G0 : Ri&lt; 0, "G0"&gt;,
+DwarfRegNum&lt;[0]&gt;;
+def G1 : Ri&lt; 1, "G1"&gt;, DwarfRegNum&lt;[1]&gt;;
+...
+def F0 : Rf&lt; 0, "F0"&gt;,
+DwarfRegNum&lt;[32]&gt;;
+def F1 : Rf&lt; 1, "F1"&gt;,
+DwarfRegNum&lt;[33]&gt;;
+...
+def D0 : Rd&lt; 0, "F0", [F0, F1]&gt;,
+DwarfRegNum&lt;[32]&gt;;
+def D1 : Rd&lt; 2, "F2", [F2, F3]&gt;,
+DwarfRegNum&lt;[34]&gt;;
+</pre>
+</div>
+
+<p>
+The last two registers shown above (<tt>D0</tt> and <tt>D1</tt>) are
+double-precision floating-point registers that are aliases for pairs of
+single-precision floating-point sub-registers. In addition to aliases, the
+sub-register and super-register relationships of the defined register are in
+fields of a register's TargetRegisterDesc.
+</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="RegisterClassDef">Defining a Register Class</a>
+</h3>
+
+<div>
+
+<p>
+The <tt>RegisterClass</tt> class (specified in <tt>Target.td</tt>) is used to
+define an object that represents a group of related registers and also defines
+the default allocation order of the registers. A target description file
+<tt>XXXRegisterInfo.td</tt> that uses <tt>Target.td</tt> can construct register
+classes using the following class:
+</p>
+
+<div class="doc_code">
+<pre>
+class RegisterClass&lt;string namespace,
+list&lt;ValueType&gt; regTypes, int alignment, dag regList&gt; {
+ string Namespace = namespace;
+ list&lt;ValueType&gt; RegTypes = regTypes;
+ int Size = 0; // spill size, in bits; zero lets tblgen pick the size
+ int Alignment = alignment;
+
+ // CopyCost is the cost of copying a value between two registers
+ // default value 1 means a single instruction
+ // A negative value means copying is extremely expensive or impossible
+ int CopyCost = 1;
+ dag MemberList = regList;
+
+ // for register classes that are subregisters of this class
+ list&lt;RegisterClass&gt; SubRegClassList = [];
+
+ code MethodProtos = [{}]; // to insert arbitrary code
+ code MethodBodies = [{}];
+}
+</pre>
+</div>
+
+<p>To define a RegisterClass, use the following 4 arguments:</p>
+
+<ul>
+<li>The first argument of the definition is the name of the namespace.</li>
+
+<li>The second argument is a list of <tt>ValueType</tt> register type values
+ that are defined in <tt>include/llvm/CodeGen/ValueTypes.td</tt>. Defined
+ values include integer types (such as <tt>i16</tt>, <tt>i32</tt>,
+ and <tt>i1</tt> for Boolean), floating-point types
+ (<tt>f32</tt>, <tt>f64</tt>), and vector types (for example, <tt>v8i16</tt>
+ for an <tt>8 x i16</tt> vector). All registers in a <tt>RegisterClass</tt>
+ must have the same <tt>ValueType</tt>, but some registers may store vector
+ data in different configurations. For example a register that can process a
+ 128-bit vector may be able to handle 16 8-bit integer elements, 8 16-bit
+ integers, 4 32-bit integers, and so on. </li>
+
+<li>The third argument of the <tt>RegisterClass</tt> definition specifies the
+ alignment required of the registers when they are stored or loaded to
+ memory.</li>
+
+<li>The final argument, <tt>regList</tt>, specifies which registers are in this
+ class. If an alternative allocation order method is not specified, then
+ <tt>regList</tt> also defines the order of allocation used by the register
+ allocator. Besides simply listing registers with <tt>(add R0, R1, ...)</tt>,
+ more advanced set operators are available. See
+ <tt>include/llvm/Target/Target.td</tt> for more information.</li>
+</ul>
+
+<p>
+In <tt>SparcRegisterInfo.td</tt>, three RegisterClass objects are defined:
+<tt>FPRegs</tt>, <tt>DFPRegs</tt>, and <tt>IntRegs</tt>. For all three register
+classes, the first argument defines the namespace with the string
+'<tt>SP</tt>'. <tt>FPRegs</tt> defines a group of 32 single-precision
+floating-point registers (<tt>F0</tt> to <tt>F31</tt>); <tt>DFPRegs</tt> defines
+a group of 16 double-precision registers
+(<tt>D0-D15</tt>).
+</p>
+
+<div class="doc_code">
+<pre>
+// F0, F1, F2, ..., F31
+def FPRegs : RegisterClass&lt;"SP", [f32], 32, (sequence "F%u", 0, 31)&gt;;
+
+def DFPRegs : RegisterClass&lt;"SP", [f64], 64,
+ (add D0, D1, D2, D3, D4, D5, D6, D7, D8,
+ D9, D10, D11, D12, D13, D14, D15)&gt;;
+&nbsp;
+def IntRegs : RegisterClass&lt;"SP", [i32], 32,
+ (add L0, L1, L2, L3, L4, L5, L6, L7,
+ I0, I1, I2, I3, I4, I5,
+ O0, O1, O2, O3, O4, O5, O7,
+ G1,
+ // Non-allocatable regs:
+ G2, G3, G4,
+ O6, // stack ptr
+ I6, // frame ptr
+ I7, // return address
+ G0, // constant zero
+ G5, G6, G7 // reserved for kernel
+ )&gt;;
+</pre>
+</div>
+
+<p>
+Using <tt>SparcRegisterInfo.td</tt> with TableGen generates several output files
+that are intended for inclusion in other source code that you write.
+<tt>SparcRegisterInfo.td</tt> generates <tt>SparcGenRegisterInfo.h.inc</tt>,
+which should be included in the header file for the implementation of the SPARC
+register implementation that you write (<tt>SparcRegisterInfo.h</tt>). In
+<tt>SparcGenRegisterInfo.h.inc</tt> a new structure is defined called
+<tt>SparcGenRegisterInfo</tt> that uses <tt>TargetRegisterInfo</tt> as its
+base. It also specifies types, based upon the defined register
+classes: <tt>DFPRegsClass</tt>, <tt>FPRegsClass</tt>, and <tt>IntRegsClass</tt>.
+</p>
+
+<p>
+<tt>SparcRegisterInfo.td</tt> also generates <tt>SparcGenRegisterInfo.inc</tt>,
+which is included at the bottom of <tt>SparcRegisterInfo.cpp</tt>, the SPARC
+register implementation. The code below shows only the generated integer
+registers and associated register classes. The order of registers
+in <tt>IntRegs</tt> reflects the order in the definition of <tt>IntRegs</tt> in
+the target description file.
+</p>
+
+<div class="doc_code">
+<pre> // IntRegs Register Class...
+ static const unsigned IntRegs[] = {
+ SP::L0, SP::L1, SP::L2, SP::L3, SP::L4, SP::L5,
+ SP::L6, SP::L7, SP::I0, SP::I1, SP::I2, SP::I3,
+ SP::I4, SP::I5, SP::O0, SP::O1, SP::O2, SP::O3,
+ SP::O4, SP::O5, SP::O7, SP::G1, SP::G2, SP::G3,
+ SP::G4, SP::O6, SP::I6, SP::I7, SP::G0, SP::G5,
+ SP::G6, SP::G7,
+ };
+
+ // IntRegsVTs Register Class Value Types...
+ static const MVT::ValueType IntRegsVTs[] = {
+ MVT::i32, MVT::Other
+ };
+
+namespace SP { // Register class instances
+ DFPRegsClass&nbsp;&nbsp;&nbsp; DFPRegsRegClass;
+ FPRegsClass&nbsp;&nbsp;&nbsp;&nbsp; FPRegsRegClass;
+ IntRegsClass&nbsp;&nbsp;&nbsp; IntRegsRegClass;
+...
+ // IntRegs Sub-register Classess...
+ static const TargetRegisterClass* const IntRegsSubRegClasses [] = {
+ NULL
+ };
+...
+ // IntRegs Super-register Classess...
+ static const TargetRegisterClass* const IntRegsSuperRegClasses [] = {
+ NULL
+ };
+...
+ // IntRegs Register Class sub-classes...
+ static const TargetRegisterClass* const IntRegsSubclasses [] = {
+ NULL
+ };
+...
+ // IntRegs Register Class super-classes...
+ static const TargetRegisterClass* const IntRegsSuperclasses [] = {
+ NULL
+ };
+
+ IntRegsClass::IntRegsClass() : TargetRegisterClass(IntRegsRegClassID,
+ IntRegsVTs, IntRegsSubclasses, IntRegsSuperclasses, IntRegsSubRegClasses,
+ IntRegsSuperRegClasses, 4, 4, 1, IntRegs, IntRegs + 32) {}
+}
+</pre>
+</div>
+
+<p>
+The register allocators will avoid using reserved registers, and callee saved
+registers are not used until all the volatile registers have been used. That
+is usually good enough, but in some cases it may be necessary to provide custom
+allocation orders.
+</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="implementRegister">Implement a subclass of</a>
+ <a href="CodeGenerator.html#targetregisterinfo">TargetRegisterInfo</a>
+</h3>
+
+<div>
+
+<p>
+The final step is to hand code portions of <tt>XXXRegisterInfo</tt>, which
+implements the interface described in <tt>TargetRegisterInfo.h</tt>. These
+functions return <tt>0</tt>, <tt>NULL</tt>, or <tt>false</tt>, unless
+overridden. Here is a list of functions that are overridden for the SPARC
+implementation in <tt>SparcRegisterInfo.cpp</tt>:
+</p>
+
+<ul>
+<li><tt>getCalleeSavedRegs</tt> &mdash; Returns a list of callee-saved registers
+ in the order of the desired callee-save stack frame offset.</li>
+
+<li><tt>getReservedRegs</tt> &mdash; Returns a bitset indexed by physical
+ register numbers, indicating if a particular register is unavailable.</li>
+
+<li><tt>hasFP</tt> &mdash; Return a Boolean indicating if a function should have
+ a dedicated frame pointer register.</li>
+
+<li><tt>eliminateCallFramePseudoInstr</tt> &mdash; If call frame setup or
+ destroy pseudo instructions are used, this can be called to eliminate
+ them.</li>
+
+<li><tt>eliminateFrameIndex</tt> &mdash; Eliminate abstract frame indices from
+ instructions that may use them.</li>
+
+<li><tt>emitPrologue</tt> &mdash; Insert prologue code into the function.</li>
+
+<li><tt>emitEpilogue</tt> &mdash; Insert epilogue code into the function.</li>
+</ul>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="InstructionSet">Instruction Set</a>
+</h2>
+
+<!-- *********************************************************************** -->
+<div>
+
+<p>
+During the early stages of code generation, the LLVM IR code is converted to a
+<tt>SelectionDAG</tt> with nodes that are instances of the <tt>SDNode</tt> class
+containing target instructions. An <tt>SDNode</tt> has an opcode, operands, type
+requirements, and operation properties. For example, is an operation
+commutative, does an operation load from memory. The various operation node
+types are described in the <tt>include/llvm/CodeGen/SelectionDAGNodes.h</tt>
+file (values of the <tt>NodeType</tt> enum in the <tt>ISD</tt> namespace).
+</p>
+
+<p>
+TableGen uses the following target description (<tt>.td</tt>) input files to
+generate much of the code for instruction definition:
+</p>
+
+<ul>
+<li><tt>Target.td</tt> &mdash; Where the <tt>Instruction</tt>, <tt>Operand</tt>,
+ <tt>InstrInfo</tt>, and other fundamental classes are defined.</li>
+
+<li><tt>TargetSelectionDAG.td</tt>&mdash; Used by <tt>SelectionDAG</tt>
+ instruction selection generators, contains <tt>SDTC*</tt> classes (selection
+ DAG type constraint), definitions of <tt>SelectionDAG</tt> nodes (such as
+ <tt>imm</tt>, <tt>cond</tt>, <tt>bb</tt>, <tt>add</tt>, <tt>fadd</tt>,
+ <tt>sub</tt>), and pattern support (<tt>Pattern</tt>, <tt>Pat</tt>,
+ <tt>PatFrag</tt>, <tt>PatLeaf</tt>, <tt>ComplexPattern</tt>.</li>
+
+<li><tt>XXXInstrFormats.td</tt> &mdash; Patterns for definitions of
+ target-specific instructions.</li>
+
+<li><tt>XXXInstrInfo.td</tt> &mdash; Target-specific definitions of instruction
+ templates, condition codes, and instructions of an instruction set. For
+ architecture modifications, a different file name may be used. For example,
+ for Pentium with SSE instruction, this file is <tt>X86InstrSSE.td</tt>, and
+ for Pentium with MMX, this file is <tt>X86InstrMMX.td</tt>.</li>
+</ul>
+
+<p>
+There is also a target-specific <tt>XXX.td</tt> file, where <tt>XXX</tt> is the
+name of the target. The <tt>XXX.td</tt> file includes the other <tt>.td</tt>
+input files, but its contents are only directly important for subtargets.
+</p>
+
+<p>
+You should describe a concrete target-specific class <tt>XXXInstrInfo</tt> that
+represents machine instructions supported by a target machine.
+<tt>XXXInstrInfo</tt> contains an array of <tt>XXXInstrDescriptor</tt> objects,
+each of which describes one instruction. An instruction descriptor defines:</p>
+
+<ul>
+<li>Opcode mnemonic</li>
+
+<li>Number of operands</li>
+
+<li>List of implicit register definitions and uses</li>
+
+<li>Target-independent properties (such as memory access, is commutable)</li>
+
+<li>Target-specific flags </li>
+</ul>
+
+<p>
+The Instruction class (defined in <tt>Target.td</tt>) is mostly used as a base
+for more complex instruction classes.
+</p>
+
+<div class="doc_code">
+<pre>class Instruction {
+ string Namespace = "";
+ dag OutOperandList; // An dag containing the MI def operand list.
+ dag InOperandList; // An dag containing the MI use operand list.
+ string AsmString = ""; // The .s format to print the instruction with.
+ list&lt;dag&gt; Pattern; // Set to the DAG pattern for this instruction
+ list&lt;Register&gt; Uses = [];
+ list&lt;Register&gt; Defs = [];
+ list&lt;Predicate&gt; Predicates = []; // predicates turned into isel match code
+ ... remainder not shown for space ...
+}
+</pre>
+</div>
+
+<p>
+A <tt>SelectionDAG</tt> node (<tt>SDNode</tt>) should contain an object
+representing a target-specific instruction that is defined
+in <tt>XXXInstrInfo.td</tt>. The instruction objects should represent
+instructions from the architecture manual of the target machine (such as the
+SPARC Architecture Manual for the SPARC target).
+</p>
+
+<p>
+A single instruction from the architecture manual is often modeled as multiple
+target instructions, depending upon its operands. For example, a manual might
+describe an add instruction that takes a register or an immediate operand. An
+LLVM target could model this with two instructions named <tt>ADDri</tt> and
+<tt>ADDrr</tt>.
+</p>
+
+<p>
+You should define a class for each instruction category and define each opcode
+as a subclass of the category with appropriate parameters such as the fixed
+binary encoding of opcodes and extended opcodes. You should map the register
+bits to the bits of the instruction in which they are encoded (for the
+JIT). Also you should specify how the instruction should be printed when the
+automatic assembly printer is used.
+</p>
+
+<p>
+As is described in the SPARC Architecture Manual, Version 8, there are three
+major 32-bit formats for instructions. Format 1 is only for the <tt>CALL</tt>
+instruction. Format 2 is for branch on condition codes and <tt>SETHI</tt> (set
+high bits of a register) instructions. Format 3 is for other instructions.
+</p>
+
+<p>
+Each of these formats has corresponding classes in <tt>SparcInstrFormat.td</tt>.
+<tt>InstSP</tt> is a base class for other instruction classes. Additional base
+classes are specified for more precise formats: for example
+in <tt>SparcInstrFormat.td</tt>, <tt>F2_1</tt> is for <tt>SETHI</tt>,
+and <tt>F2_2</tt> is for branches. There are three other base
+classes: <tt>F3_1</tt> for register/register operations, <tt>F3_2</tt> for
+register/immediate operations, and <tt>F3_3</tt> for floating-point
+operations. <tt>SparcInstrInfo.td</tt> also adds the base class Pseudo for
+synthetic SPARC instructions.
+</p>
+
+<p>
+<tt>SparcInstrInfo.td</tt> largely consists of operand and instruction
+definitions for the SPARC target. In <tt>SparcInstrInfo.td</tt>, the following
+target description file entry, <tt>LDrr</tt>, defines the Load Integer
+instruction for a Word (the <tt>LD</tt> SPARC opcode) from a memory address to a
+register. The first parameter, the value 3 (<tt>11<sub>2</sub></tt>), is the
+operation value for this category of operation. The second parameter
+(<tt>000000<sub>2</sub></tt>) is the specific operation value
+for <tt>LD</tt>/Load Word. The third parameter is the output destination, which
+is a register operand and defined in the <tt>Register</tt> target description
+file (<tt>IntRegs</tt>).
+</p>
+
+<div class="doc_code">
+<pre>def LDrr : F3_1 &lt;3, 0b000000, (outs IntRegs:$dst), (ins MEMrr:$addr),
+ "ld [$addr], $dst",
+ [(set IntRegs:$dst, (load ADDRrr:$addr))]&gt;;
+</pre>
+</div>
+
+<p>
+The fourth parameter is the input source, which uses the address
+operand <tt>MEMrr</tt> that is defined earlier in <tt>SparcInstrInfo.td</tt>:
+</p>
+
+<div class="doc_code">
+<pre>def MEMrr : Operand&lt;i32&gt; {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops IntRegs, IntRegs);
+}
+</pre>
+</div>
+
+<p>
+The fifth parameter is a string that is used by the assembly printer and can be
+left as an empty string until the assembly printer interface is implemented. The
+sixth and final parameter is the pattern used to match the instruction during
+the SelectionDAG Select Phase described in
+(<a href="CodeGenerator.html">The LLVM
+Target-Independent Code Generator</a>). This parameter is detailed in the next
+section, <a href="#InstructionSelector">Instruction Selector</a>.
+</p>
+
+<p>
+Instruction class definitions are not overloaded for different operand types, so
+separate versions of instructions are needed for register, memory, or immediate
+value operands. For example, to perform a Load Integer instruction for a Word
+from an immediate operand to a register, the following instruction class is
+defined:
+</p>
+
+<div class="doc_code">
+<pre>def LDri : F3_2 &lt;3, 0b000000, (outs IntRegs:$dst), (ins MEMri:$addr),
+ "ld [$addr], $dst",
+ [(set IntRegs:$dst, (load ADDRri:$addr))]&gt;;
+</pre>
+</div>
+
+<p>
+Writing these definitions for so many similar instructions can involve a lot of
+cut and paste. In td files, the <tt>multiclass</tt> directive enables the
+creation of templates to define several instruction classes at once (using
+the <tt>defm</tt> directive). For example in <tt>SparcInstrInfo.td</tt>, the
+<tt>multiclass</tt> pattern <tt>F3_12</tt> is defined to create 2 instruction
+classes each time <tt>F3_12</tt> is invoked:
+</p>
+
+<div class="doc_code">
+<pre>multiclass F3_12 &lt;string OpcStr, bits&lt;6&gt; Op3Val, SDNode OpNode&gt; {
+ def rr : F3_1 &lt;2, Op3Val,
+ (outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
+ !strconcat(OpcStr, " $b, $c, $dst"),
+ [(set IntRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]&gt;;
+ def ri : F3_2 &lt;2, Op3Val,
+ (outs IntRegs:$dst), (ins IntRegs:$b, i32imm:$c),
+ !strconcat(OpcStr, " $b, $c, $dst"),
+ [(set IntRegs:$dst, (OpNode IntRegs:$b, simm13:$c))]&gt;;
+}
+</pre>
+</div>
+
+<p>
+So when the <tt>defm</tt> directive is used for the <tt>XOR</tt>
+and <tt>ADD</tt> instructions, as seen below, it creates four instruction
+objects: <tt>XORrr</tt>, <tt>XORri</tt>, <tt>ADDrr</tt>, and <tt>ADDri</tt>.
+</p>
+
+<div class="doc_code">
+<pre>
+defm XOR : F3_12&lt;"xor", 0b000011, xor&gt;;
+defm ADD : F3_12&lt;"add", 0b000000, add&gt;;
+</pre>
+</div>
+
+<p>
+<tt>SparcInstrInfo.td</tt> also includes definitions for condition codes that
+are referenced by branch instructions. The following definitions
+in <tt>SparcInstrInfo.td</tt> indicate the bit location of the SPARC condition
+code. For example, the 10<sup>th</sup> bit represents the 'greater than'
+condition for integers, and the 22<sup>nd</sup> bit represents the 'greater
+than' condition for floats.
+</p>
+
+<div class="doc_code">
+<pre>
+def ICC_NE : ICC_VAL&lt; 9&gt;; // Not Equal
+def ICC_E : ICC_VAL&lt; 1&gt;; // Equal
+def ICC_G : ICC_VAL&lt;10&gt;; // Greater
+...
+def FCC_U : FCC_VAL&lt;23&gt;; // Unordered
+def FCC_G : FCC_VAL&lt;22&gt;; // Greater
+def FCC_UG : FCC_VAL&lt;21&gt;; // Unordered or Greater
+...
+</pre>
+</div>
+
+<p>
+(Note that <tt>Sparc.h</tt> also defines enums that correspond to the same SPARC
+condition codes. Care must be taken to ensure the values in <tt>Sparc.h</tt>
+correspond to the values in <tt>SparcInstrInfo.td</tt>. I.e.,
+<tt>SPCC::ICC_NE = 9</tt>, <tt>SPCC::FCC_U = 23</tt> and so on.)
+</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="operandMapping">Instruction Operand Mapping</a>
+</h3>
+
+<div>
+
+<p>
+The code generator backend maps instruction operands to fields in the
+instruction. Operands are assigned to unbound fields in the instruction in the
+order they are defined. Fields are bound when they are assigned a value. For
+example, the Sparc target defines the <tt>XNORrr</tt> instruction as
+a <tt>F3_1</tt> format instruction having three operands.
+</p>
+
+<div class="doc_code">
+<pre>
+def XNORrr : F3_1&lt;2, 0b000111,
+ (outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
+ "xnor $b, $c, $dst",
+ [(set IntRegs:$dst, (not (xor IntRegs:$b, IntRegs:$c)))]&gt;;
+</pre>
+</div>
+
+<p>
+The instruction templates in <tt>SparcInstrFormats.td</tt> show the base class
+for <tt>F3_1</tt> is <tt>InstSP</tt>.
+</p>
+
+<div class="doc_code">
+<pre>
+class InstSP&lt;dag outs, dag ins, string asmstr, list&lt;dag&gt; pattern&gt; : Instruction {
+ field bits&lt;32&gt; Inst;
+ let Namespace = "SP";
+ bits&lt;2&gt; op;
+ let Inst{31-30} = op;
+ dag OutOperandList = outs;
+ dag InOperandList = ins;
+ let AsmString = asmstr;
+ let Pattern = pattern;
+}
+</pre>
+</div>
+
+<p><tt>InstSP</tt> leaves the <tt>op</tt> field unbound.</p>
+
+<div class="doc_code">
+<pre>
+class F3&lt;dag outs, dag ins, string asmstr, list&lt;dag&gt; pattern&gt;
+ : InstSP&lt;outs, ins, asmstr, pattern&gt; {
+ bits&lt;5&gt; rd;
+ bits&lt;6&gt; op3;
+ bits&lt;5&gt; rs1;
+ let op{1} = 1; // Op = 2 or 3
+ let Inst{29-25} = rd;
+ let Inst{24-19} = op3;
+ let Inst{18-14} = rs1;
+}
+</pre>
+</div>
+
+<p>
+<tt>F3</tt> binds the <tt>op</tt> field and defines the <tt>rd</tt>,
+<tt>op3</tt>, and <tt>rs1</tt> fields. <tt>F3</tt> format instructions will
+bind the operands <tt>rd</tt>, <tt>op3</tt>, and <tt>rs1</tt> fields.
+</p>
+
+<div class="doc_code">
+<pre>
+class F3_1&lt;bits&lt;2&gt; opVal, bits&lt;6&gt; op3val, dag outs, dag ins,
+ string asmstr, list&lt;dag&gt; pattern&gt; : F3&lt;outs, ins, asmstr, pattern&gt; {
+ bits&lt;8&gt; asi = 0; // asi not currently used
+ bits&lt;5&gt; rs2;
+ let op = opVal;
+ let op3 = op3val;
+ let Inst{13} = 0; // i field = 0
+ let Inst{12-5} = asi; // address space identifier
+ let Inst{4-0} = rs2;
+}
+</pre>
+</div>
+
+<p>
+<tt>F3_1</tt> binds the <tt>op3</tt> field and defines the <tt>rs2</tt>
+fields. <tt>F3_1</tt> format instructions will bind the operands to the <tt>rd</tt>,
+<tt>rs1</tt>, and <tt>rs2</tt> fields. This results in the <tt>XNORrr</tt>
+instruction binding <tt>$dst</tt>, <tt>$b</tt>, and <tt>$c</tt> operands to
+the <tt>rd</tt>, <tt>rs1</tt>, and <tt>rs2</tt> fields respectively.
+</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="implementInstr">Implement a subclass of </a>
+ <a href="CodeGenerator.html#targetinstrinfo">TargetInstrInfo</a>
+</h3>
+
+<div>
+
+<p>
+The final step is to hand code portions of <tt>XXXInstrInfo</tt>, which
+implements the interface described in <tt>TargetInstrInfo.h</tt>. These
+functions return <tt>0</tt> or a Boolean or they assert, unless
+overridden. Here's a list of functions that are overridden for the SPARC
+implementation in <tt>SparcInstrInfo.cpp</tt>:
+</p>
+
+<ul>
+<li><tt>isLoadFromStackSlot</tt> &mdash; If the specified machine instruction is
+ a direct load from a stack slot, return the register number of the
+ destination and the <tt>FrameIndex</tt> of the stack slot.</li>
+
+<li><tt>isStoreToStackSlot</tt> &mdash; If the specified machine instruction is
+ a direct store to a stack slot, return the register number of the
+ destination and the <tt>FrameIndex</tt> of the stack slot.</li>
+
+<li><tt>copyPhysReg</tt> &mdash; Copy values between a pair of physical
+ registers.</li>
+
+<li><tt>storeRegToStackSlot</tt> &mdash; Store a register value to a stack
+ slot.</li>
+
+<li><tt>loadRegFromStackSlot</tt> &mdash; Load a register value from a stack
+ slot.</li>
+
+<li><tt>storeRegToAddr</tt> &mdash; Store a register value to memory.</li>
+
+<li><tt>loadRegFromAddr</tt> &mdash; Load a register value from memory.</li>
+
+<li><tt>foldMemoryOperand</tt> &mdash; Attempt to combine instructions of any
+ load or store instruction for the specified operand(s).</li>
+</ul>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="branchFolding">Branch Folding and If Conversion</a>
+</h3>
+<div>
+
+<p>
+Performance can be improved by combining instructions or by eliminating
+instructions that are never reached. The <tt>AnalyzeBranch</tt> method
+in <tt>XXXInstrInfo</tt> may be implemented to examine conditional instructions
+and remove unnecessary instructions. <tt>AnalyzeBranch</tt> looks at the end of
+a machine basic block (MBB) for opportunities for improvement, such as branch
+folding and if conversion. The <tt>BranchFolder</tt> and <tt>IfConverter</tt>
+machine function passes (see the source files <tt>BranchFolding.cpp</tt> and
+<tt>IfConversion.cpp</tt> in the <tt>lib/CodeGen</tt> directory) call
+<tt>AnalyzeBranch</tt> to improve the control flow graph that represents the
+instructions.
+</p>
+
+<p>
+Several implementations of <tt>AnalyzeBranch</tt> (for ARM, Alpha, and X86) can
+be examined as models for your own <tt>AnalyzeBranch</tt> implementation. Since
+SPARC does not implement a useful <tt>AnalyzeBranch</tt>, the ARM target
+implementation is shown below.
+</p>
+
+<p><tt>AnalyzeBranch</tt> returns a Boolean value and takes four parameters:</p>
+
+<ul>
+<li><tt>MachineBasicBlock &amp;MBB</tt> &mdash; The incoming block to be
+ examined.</li>
+
+<li><tt>MachineBasicBlock *&amp;TBB</tt> &mdash; A destination block that is
+ returned. For a conditional branch that evaluates to true, <tt>TBB</tt> is
+ the destination.</li>
+
+<li><tt>MachineBasicBlock *&amp;FBB</tt> &mdash; For a conditional branch that
+ evaluates to false, <tt>FBB</tt> is returned as the destination.</li>
+
+<li><tt>std::vector&lt;MachineOperand&gt; &amp;Cond</tt> &mdash; List of
+ operands to evaluate a condition for a conditional branch.</li>
+</ul>
+
+<p>
+In the simplest case, if a block ends without a branch, then it falls through to
+the successor block. No destination blocks are specified for either <tt>TBB</tt>
+or <tt>FBB</tt>, so both parameters return <tt>NULL</tt>. The start of
+the <tt>AnalyzeBranch</tt> (see code below for the ARM target) shows the
+function parameters and the code for the simplest case.
+</p>
+
+<div class="doc_code">
+<pre>bool ARMInstrInfo::AnalyzeBranch(MachineBasicBlock &amp;MBB,
+ MachineBasicBlock *&amp;TBB, MachineBasicBlock *&amp;FBB,
+ std::vector&lt;MachineOperand&gt; &amp;Cond) const
+{
+ MachineBasicBlock::iterator I = MBB.end();
+ if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
+ return false;
+</pre>
+</div>
+
+<p>
+If a block ends with a single unconditional branch instruction, then
+<tt>AnalyzeBranch</tt> (shown below) should return the destination of that
+branch in the <tt>TBB</tt> parameter.
+</p>
+
+<div class="doc_code">
+<pre>
+ if (LastOpc == ARM::B || LastOpc == ARM::tB) {
+ TBB = LastInst-&gt;getOperand(0).getMBB();
+ return false;
+ }
+</pre>
+</div>
+
+<p>
+If a block ends with two unconditional branches, then the second branch is never
+reached. In that situation, as shown below, remove the last branch instruction
+and return the penultimate branch in the <tt>TBB</tt> parameter.
+</p>
+
+<div class="doc_code">
+<pre>
+ if ((SecondLastOpc == ARM::B || SecondLastOpc==ARM::tB) &amp;&amp;
+ (LastOpc == ARM::B || LastOpc == ARM::tB)) {
+ TBB = SecondLastInst-&gt;getOperand(0).getMBB();
+ I = LastInst;
+ I-&gt;eraseFromParent();
+ return false;
+ }
+</pre>
+</div>
+
+<p>
+A block may end with a single conditional branch instruction that falls through
+to successor block if the condition evaluates to false. In that case,
+<tt>AnalyzeBranch</tt> (shown below) should return the destination of that
+conditional branch in the <tt>TBB</tt> parameter and a list of operands in
+the <tt>Cond</tt> parameter to evaluate the condition.
+</p>
+
+<div class="doc_code">
+<pre>
+ if (LastOpc == ARM::Bcc || LastOpc == ARM::tBcc) {
+ // Block ends with fall-through condbranch.
+ TBB = LastInst-&gt;getOperand(0).getMBB();
+ Cond.push_back(LastInst-&gt;getOperand(1));
+ Cond.push_back(LastInst-&gt;getOperand(2));
+ return false;
+ }
+</pre>
+</div>
+
+<p>
+If a block ends with both a conditional branch and an ensuing unconditional
+branch, then <tt>AnalyzeBranch</tt> (shown below) should return the conditional
+branch destination (assuming it corresponds to a conditional evaluation of
+'<tt>true</tt>') in the <tt>TBB</tt> parameter and the unconditional branch
+destination in the <tt>FBB</tt> (corresponding to a conditional evaluation of
+'<tt>false</tt>'). A list of operands to evaluate the condition should be
+returned in the <tt>Cond</tt> parameter.
+</p>
+
+<div class="doc_code">
+<pre>
+ unsigned SecondLastOpc = SecondLastInst-&gt;getOpcode();
+
+ if ((SecondLastOpc == ARM::Bcc &amp;&amp; LastOpc == ARM::B) ||
+ (SecondLastOpc == ARM::tBcc &amp;&amp; LastOpc == ARM::tB)) {
+ TBB = SecondLastInst-&gt;getOperand(0).getMBB();
+ Cond.push_back(SecondLastInst-&gt;getOperand(1));
+ Cond.push_back(SecondLastInst-&gt;getOperand(2));
+ FBB = LastInst-&gt;getOperand(0).getMBB();
+ return false;
+ }
+</pre>
+</div>
+
+<p>
+For the last two cases (ending with a single conditional branch or ending with
+one conditional and one unconditional branch), the operands returned in
+the <tt>Cond</tt> parameter can be passed to methods of other instructions to
+create new branches or perform other operations. An implementation
+of <tt>AnalyzeBranch</tt> requires the helper methods <tt>RemoveBranch</tt>
+and <tt>InsertBranch</tt> to manage subsequent operations.
+</p>
+
+<p>
+<tt>AnalyzeBranch</tt> should return false indicating success in most circumstances.
+<tt>AnalyzeBranch</tt> should only return true when the method is stumped about what to
+do, for example, if a block has three terminating branches. <tt>AnalyzeBranch</tt> may
+return true if it encounters a terminator it cannot handle, such as an indirect
+branch.
+</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="InstructionSelector">Instruction Selector</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+LLVM uses a <tt>SelectionDAG</tt> to represent LLVM IR instructions, and nodes
+of the <tt>SelectionDAG</tt> ideally represent native target
+instructions. During code generation, instruction selection passes are performed
+to convert non-native DAG instructions into native target-specific
+instructions. The pass described in <tt>XXXISelDAGToDAG.cpp</tt> is used to
+match patterns and perform DAG-to-DAG instruction selection. Optionally, a pass
+may be defined (in <tt>XXXBranchSelector.cpp</tt>) to perform similar DAG-to-DAG
+operations for branch instructions. Later, the code in
+<tt>XXXISelLowering.cpp</tt> replaces or removes operations and data types not
+supported natively (legalizes) in a <tt>SelectionDAG</tt>.
+</p>
+
+<p>
+TableGen generates code for instruction selection using the following target
+description input files:
+</p>
+
+<ul>
+<li><tt>XXXInstrInfo.td</tt> &mdash; Contains definitions of instructions in a
+ target-specific instruction set, generates <tt>XXXGenDAGISel.inc</tt>, which
+ is included in <tt>XXXISelDAGToDAG.cpp</tt>.</li>
+
+<li><tt>XXXCallingConv.td</tt> &mdash; Contains the calling and return value
+ conventions for the target architecture, and it generates
+ <tt>XXXGenCallingConv.inc</tt>, which is included in
+ <tt>XXXISelLowering.cpp</tt>.</li>
+</ul>
+
+<p>
+The implementation of an instruction selection pass must include a header that
+declares the <tt>FunctionPass</tt> class or a subclass of <tt>FunctionPass</tt>. In
+<tt>XXXTargetMachine.cpp</tt>, a Pass Manager (PM) should add each instruction
+selection pass into the queue of passes to run.
+</p>
+
+<p>
+The LLVM static compiler (<tt>llc</tt>) is an excellent tool for visualizing the
+contents of DAGs. To display the <tt>SelectionDAG</tt> before or after specific
+processing phases, use the command line options for <tt>llc</tt>, described
+at <a href="CodeGenerator.html#selectiondag_process">
+SelectionDAG Instruction Selection Process</a>.
+</p>
+
+<p>
+To describe instruction selector behavior, you should add patterns for lowering
+LLVM code into a <tt>SelectionDAG</tt> as the last parameter of the instruction
+definitions in <tt>XXXInstrInfo.td</tt>. For example, in
+<tt>SparcInstrInfo.td</tt>, this entry defines a register store operation, and
+the last parameter describes a pattern with the store DAG operator.
+</p>
+
+<div class="doc_code">
+<pre>
+def STrr : F3_1&lt; 3, 0b000100, (outs), (ins MEMrr:$addr, IntRegs:$src),
+ "st $src, [$addr]", [(store IntRegs:$src, ADDRrr:$addr)]&gt;;
+</pre>
+</div>
+
+<p>
+<tt>ADDRrr</tt> is a memory mode that is also defined in
+<tt>SparcInstrInfo.td</tt>:
+</p>
+
+<div class="doc_code">
+<pre>
+def ADDRrr : ComplexPattern&lt;i32, 2, "SelectADDRrr", [], []&gt;;
+</pre>
+</div>
+
+<p>
+The definition of <tt>ADDRrr</tt> refers to <tt>SelectADDRrr</tt>, which is a
+function defined in an implementation of the Instructor Selector (such
+as <tt>SparcISelDAGToDAG.cpp</tt>).
+</p>
+
+<p>
+In <tt>lib/Target/TargetSelectionDAG.td</tt>, the DAG operator for store is
+defined below:
+</p>
+
+<div class="doc_code">
+<pre>
+def store : PatFrag&lt;(ops node:$val, node:$ptr),
+ (st node:$val, node:$ptr), [{
+ if (StoreSDNode *ST = dyn_cast&lt;StoreSDNode&gt;(N))
+ return !ST-&gt;isTruncatingStore() &amp;&amp;
+ ST-&gt;getAddressingMode() == ISD::UNINDEXED;
+ return false;
+}]&gt;;
+</pre>
+</div>
+
+<p>
+<tt>XXXInstrInfo.td</tt> also generates (in <tt>XXXGenDAGISel.inc</tt>) the
+<tt>SelectCode</tt> method that is used to call the appropriate processing
+method for an instruction. In this example, <tt>SelectCode</tt>
+calls <tt>Select_ISD_STORE</tt> for the <tt>ISD::STORE</tt> opcode.
+</p>
+
+<div class="doc_code">
+<pre>
+SDNode *SelectCode(SDValue N) {
+ ...
+ MVT::ValueType NVT = N.getNode()-&gt;getValueType(0);
+ switch (N.getOpcode()) {
+ case ISD::STORE: {
+ switch (NVT) {
+ default:
+ return Select_ISD_STORE(N);
+ break;
+ }
+ break;
+ }
+ ...
+</pre>
+</div>
+
+<p>
+The pattern for <tt>STrr</tt> is matched, so elsewhere in
+<tt>XXXGenDAGISel.inc</tt>, code for <tt>STrr</tt> is created for
+<tt>Select_ISD_STORE</tt>. The <tt>Emit_22</tt> method is also generated
+in <tt>XXXGenDAGISel.inc</tt> to complete the processing of this
+instruction.
+</p>
+
+<div class="doc_code">
+<pre>
+SDNode *Select_ISD_STORE(const SDValue &amp;N) {
+ SDValue Chain = N.getOperand(0);
+ if (Predicate_store(N.getNode())) {
+ SDValue N1 = N.getOperand(1);
+ SDValue N2 = N.getOperand(2);
+ SDValue CPTmp0;
+ SDValue CPTmp1;
+
+ // Pattern: (st:void IntRegs:i32:$src,
+ // ADDRrr:i32:$addr)&lt;&lt;P:Predicate_store&gt;&gt;
+ // Emits: (STrr:void ADDRrr:i32:$addr, IntRegs:i32:$src)
+ // Pattern complexity = 13 cost = 1 size = 0
+ if (SelectADDRrr(N, N2, CPTmp0, CPTmp1) &amp;&amp;
+ N1.getNode()-&gt;getValueType(0) == MVT::i32 &amp;&amp;
+ N2.getNode()-&gt;getValueType(0) == MVT::i32) {
+ return Emit_22(N, SP::STrr, CPTmp0, CPTmp1);
+ }
+...
+</pre>
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="LegalizePhase">The SelectionDAG Legalize Phase</a>
+</h3>
+
+<div>
+
+<p>
+The Legalize phase converts a DAG to use types and operations that are natively
+supported by the target. For natively unsupported types and operations, you need
+to add code to the target-specific XXXTargetLowering implementation to convert
+unsupported types and operations to supported ones.
+</p>
+
+<p>
+In the constructor for the <tt>XXXTargetLowering</tt> class, first use the
+<tt>addRegisterClass</tt> method to specify which types are supports and which
+register classes are associated with them. The code for the register classes are
+generated by TableGen from <tt>XXXRegisterInfo.td</tt> and placed
+in <tt>XXXGenRegisterInfo.h.inc</tt>. For example, the implementation of the
+constructor for the SparcTargetLowering class (in
+<tt>SparcISelLowering.cpp</tt>) starts with the following code:
+</p>
+
+<div class="doc_code">
+<pre>
+addRegisterClass(MVT::i32, SP::IntRegsRegisterClass);
+addRegisterClass(MVT::f32, SP::FPRegsRegisterClass);
+addRegisterClass(MVT::f64, SP::DFPRegsRegisterClass);
+</pre>
+</div>
+
+<p>
+You should examine the node types in the <tt>ISD</tt> namespace
+(<tt>include/llvm/CodeGen/SelectionDAGNodes.h</tt>) and determine which
+operations the target natively supports. For operations that do <b>not</b> have
+native support, add a callback to the constructor for the XXXTargetLowering
+class, so the instruction selection process knows what to do. The TargetLowering
+class callback methods (declared in <tt>llvm/Target/TargetLowering.h</tt>) are:
+</p>
+
+<ul>
+<li><tt>setOperationAction</tt> &mdash; General operation.</li>
+
+<li><tt>setLoadExtAction</tt> &mdash; Load with extension.</li>
+
+<li><tt>setTruncStoreAction</tt> &mdash; Truncating store.</li>
+
+<li><tt>setIndexedLoadAction</tt> &mdash; Indexed load.</li>
+
+<li><tt>setIndexedStoreAction</tt> &mdash; Indexed store.</li>
+
+<li><tt>setConvertAction</tt> &mdash; Type conversion.</li>
+
+<li><tt>setCondCodeAction</tt> &mdash; Support for a given condition code.</li>
+</ul>
+
+<p>
+Note: on older releases, <tt>setLoadXAction</tt> is used instead
+of <tt>setLoadExtAction</tt>. Also, on older releases,
+<tt>setCondCodeAction</tt> may not be supported. Examine your release
+to see what methods are specifically supported.
+</p>
+
+<p>
+These callbacks are used to determine that an operation does or does not work
+with a specified type (or types). And in all cases, the third parameter is
+a <tt>LegalAction</tt> type enum value: <tt>Promote</tt>, <tt>Expand</tt>,
+<tt>Custom</tt>, or <tt>Legal</tt>. <tt>SparcISelLowering.cpp</tt>
+contains examples of all four <tt>LegalAction</tt> values.
+</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="promote">Promote</a>
+</h4>
+
+<div>
+
+<p>
+For an operation without native support for a given type, the specified type may
+be promoted to a larger type that is supported. For example, SPARC does not
+support a sign-extending load for Boolean values (<tt>i1</tt> type), so
+in <tt>SparcISelLowering.cpp</tt> the third parameter below, <tt>Promote</tt>,
+changes <tt>i1</tt> type values to a large type before loading.
+</p>
+
+<div class="doc_code">
+<pre>
+setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+</pre>
+</div>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="expand">Expand</a>
+</h4>
+
+<div>
+
+<p>
+For a type without native support, a value may need to be broken down further,
+rather than promoted. For an operation without native support, a combination of
+other operations may be used to similar effect. In SPARC, the floating-point
+sine and cosine trig operations are supported by expansion to other operations,
+as indicated by the third parameter, <tt>Expand</tt>, to
+<tt>setOperationAction</tt>:
+</p>
+
+<div class="doc_code">
+<pre>
+setOperationAction(ISD::FSIN, MVT::f32, Expand);
+setOperationAction(ISD::FCOS, MVT::f32, Expand);
+</pre>
+</div>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="custom">Custom</a>
+</h4>
+
+<div>
+
+<p>
+For some operations, simple type promotion or operation expansion may be
+insufficient. In some cases, a special intrinsic function must be implemented.
+</p>
+
+<p>
+For example, a constant value may require special treatment, or an operation may
+require spilling and restoring registers in the stack and working with register
+allocators.
+</p>
+
+<p>
+As seen in <tt>SparcISelLowering.cpp</tt> code below, to perform a type
+conversion from a floating point value to a signed integer, first the
+<tt>setOperationAction</tt> should be called with <tt>Custom</tt> as the third
+parameter:
+</p>
+
+<div class="doc_code">
+<pre>
+setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
+</pre>
+</div>
+
+<p>
+In the <tt>LowerOperation</tt> method, for each <tt>Custom</tt> operation, a
+case statement should be added to indicate what function to call. In the
+following code, an <tt>FP_TO_SINT</tt> opcode will call
+the <tt>LowerFP_TO_SINT</tt> method:
+</p>
+
+<div class="doc_code">
+<pre>
+SDValue SparcTargetLowering::LowerOperation(SDValue Op, SelectionDAG &amp;DAG) {
+ switch (Op.getOpcode()) {
+ case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
+ ...
+ }
+}
+</pre>
+</div>
+
+<p>
+Finally, the <tt>LowerFP_TO_SINT</tt> method is implemented, using an FP
+register to convert the floating-point value to an integer.
+</p>
+
+<div class="doc_code">
+<pre>
+static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &amp;DAG) {
+ assert(Op.getValueType() == MVT::i32);
+ Op = DAG.getNode(SPISD::FTOI, MVT::f32, Op.getOperand(0));
+ return DAG.getNode(ISD::BITCAST, MVT::i32, Op);
+}
+</pre>
+</div>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="legal">Legal</a>
+</h4>
+
+<div>
+
+<p>
+The <tt>Legal</tt> LegalizeAction enum value simply indicates that an
+operation <b>is</b> natively supported. <tt>Legal</tt> represents the default
+condition, so it is rarely used. In <tt>SparcISelLowering.cpp</tt>, the action
+for <tt>CTPOP</tt> (an operation to count the bits set in an integer) is
+natively supported only for SPARC v9. The following code enables
+the <tt>Expand</tt> conversion technique for non-v9 SPARC implementations.
+</p>
+
+<div class="doc_code">
+<pre>
+setOperationAction(ISD::CTPOP, MVT::i32, Expand);
+...
+if (TM.getSubtarget&lt;SparcSubtarget&gt;().isV9())
+ setOperationAction(ISD::CTPOP, MVT::i32, Legal);
+ case ISD::SETULT: return SPCC::ICC_CS;
+ case ISD::SETULE: return SPCC::ICC_LEU;
+ case ISD::SETUGT: return SPCC::ICC_GU;
+ case ISD::SETUGE: return SPCC::ICC_CC;
+ }
+}
+</pre>
+</div>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="callingConventions">Calling Conventions</a>
+</h3>
+
+<div>
+
+<p>
+To support target-specific calling conventions, <tt>XXXGenCallingConv.td</tt>
+uses interfaces (such as CCIfType and CCAssignToReg) that are defined in
+<tt>lib/Target/TargetCallingConv.td</tt>. TableGen can take the target
+descriptor file <tt>XXXGenCallingConv.td</tt> and generate the header
+file <tt>XXXGenCallingConv.inc</tt>, which is typically included
+in <tt>XXXISelLowering.cpp</tt>. You can use the interfaces in
+<tt>TargetCallingConv.td</tt> to specify:
+</p>
+
+<ul>
+<li>The order of parameter allocation.</li>
+
+<li>Where parameters and return values are placed (that is, on the stack or in
+ registers).</li>
+
+<li>Which registers may be used.</li>
+
+<li>Whether the caller or callee unwinds the stack.</li>
+</ul>
+
+<p>
+The following example demonstrates the use of the <tt>CCIfType</tt> and
+<tt>CCAssignToReg</tt> interfaces. If the <tt>CCIfType</tt> predicate is true
+(that is, if the current argument is of type <tt>f32</tt> or <tt>f64</tt>), then
+the action is performed. In this case, the <tt>CCAssignToReg</tt> action assigns
+the argument value to the first available register: either <tt>R0</tt>
+or <tt>R1</tt>.
+</p>
+
+<div class="doc_code">
+<pre>
+CCIfType&lt;[f32,f64], CCAssignToReg&lt;[R0, R1]&gt;&gt;
+</pre>
+</div>
+
+<p>
+<tt>SparcCallingConv.td</tt> contains definitions for a target-specific
+return-value calling convention (RetCC_Sparc32) and a basic 32-bit C calling
+convention (<tt>CC_Sparc32</tt>). The definition of <tt>RetCC_Sparc32</tt>
+(shown below) indicates which registers are used for specified scalar return
+types. A single-precision float is returned to register <tt>F0</tt>, and a
+double-precision float goes to register <tt>D0</tt>. A 32-bit integer is
+returned in register <tt>I0</tt> or <tt>I1</tt>.
+</p>
+
+<div class="doc_code">
+<pre>
+def RetCC_Sparc32 : CallingConv&lt;[
+ CCIfType&lt;[i32], CCAssignToReg&lt;[I0, I1]&gt;&gt;,
+ CCIfType&lt;[f32], CCAssignToReg&lt;[F0]&gt;&gt;,
+ CCIfType&lt;[f64], CCAssignToReg&lt;[D0]&gt;&gt;
+]&gt;;
+</pre>
+</div>
+
+<p>
+The definition of <tt>CC_Sparc32</tt> in <tt>SparcCallingConv.td</tt> introduces
+<tt>CCAssignToStack</tt>, which assigns the value to a stack slot with the
+specified size and alignment. In the example below, the first parameter, 4,
+indicates the size of the slot, and the second parameter, also 4, indicates the
+stack alignment along 4-byte units. (Special cases: if size is zero, then the
+ABI size is used; if alignment is zero, then the ABI alignment is used.)
+</p>
+
+<div class="doc_code">
+<pre>
+def CC_Sparc32 : CallingConv&lt;[
+ // All arguments get passed in integer registers if there is space.
+ CCIfType&lt;[i32, f32, f64], CCAssignToReg&lt;[I0, I1, I2, I3, I4, I5]&gt;&gt;,
+ CCAssignToStack&lt;4, 4&gt;
+]&gt;;
+</pre>
+</div>
+
+<p>
+<tt>CCDelegateTo</tt> is another commonly used interface, which tries to find a
+specified sub-calling convention, and, if a match is found, it is invoked. In
+the following example (in <tt>X86CallingConv.td</tt>), the definition of
+<tt>RetCC_X86_32_C</tt> ends with <tt>CCDelegateTo</tt>. After the current value
+is assigned to the register <tt>ST0</tt> or <tt>ST1</tt>,
+the <tt>RetCC_X86Common</tt> is invoked.
+</p>
+
+<div class="doc_code">
+<pre>
+def RetCC_X86_32_C : CallingConv&lt;[
+ CCIfType&lt;[f32], CCAssignToReg&lt;[ST0, ST1]&gt;&gt;,
+ CCIfType&lt;[f64], CCAssignToReg&lt;[ST0, ST1]&gt;&gt;,
+ CCDelegateTo&lt;RetCC_X86Common&gt;
+]&gt;;
+</pre>
+</div>
+
+<p>
+<tt>CCIfCC</tt> is an interface that attempts to match the given name to the
+current calling convention. If the name identifies the current calling
+convention, then a specified action is invoked. In the following example (in
+<tt>X86CallingConv.td</tt>), if the <tt>Fast</tt> calling convention is in use,
+then <tt>RetCC_X86_32_Fast</tt> is invoked. If the <tt>SSECall</tt> calling
+convention is in use, then <tt>RetCC_X86_32_SSE</tt> is invoked.
+</p>
+
+<div class="doc_code">
+<pre>
+def RetCC_X86_32 : CallingConv&lt;[
+ CCIfCC&lt;"CallingConv::Fast", CCDelegateTo&lt;RetCC_X86_32_Fast&gt;&gt;,
+ CCIfCC&lt;"CallingConv::X86_SSECall", CCDelegateTo&lt;RetCC_X86_32_SSE&gt;&gt;,
+ CCDelegateTo&lt;RetCC_X86_32_C&gt;
+]&gt;;
+</pre>
+</div>
+
+<p>Other calling convention interfaces include:</p>
+
+<ul>
+<li><tt>CCIf &lt;predicate, action&gt;</tt> &mdash; If the predicate matches,
+ apply the action.</li>
+
+<li><tt>CCIfInReg &lt;action&gt;</tt> &mdash; If the argument is marked with the
+ '<tt>inreg</tt>' attribute, then apply the action.</li>
+
+<li><tt>CCIfNest &lt;action&gt;</tt> &mdash; Inf the argument is marked with the
+ '<tt>nest</tt>' attribute, then apply the action.</li>
+
+<li><tt>CCIfNotVarArg &lt;action&gt;</tt> &mdash; If the current function does
+ not take a variable number of arguments, apply the action.</li>
+
+<li><tt>CCAssignToRegWithShadow &lt;registerList, shadowList&gt;</tt> &mdash;
+ similar to <tt>CCAssignToReg</tt>, but with a shadow list of registers.</li>
+
+<li><tt>CCPassByVal &lt;size, align&gt;</tt> &mdash; Assign value to a stack
+ slot with the minimum specified size and alignment.</li>
+
+<li><tt>CCPromoteToType &lt;type&gt;</tt> &mdash; Promote the current value to
+ the specified type.</li>
+
+<li><tt>CallingConv &lt;[actions]&gt;</tt> &mdash; Define each calling
+ convention that is supported.</li>
+</ul>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="assemblyPrinter">Assembly Printer</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+During the code emission stage, the code generator may utilize an LLVM pass to
+produce assembly output. To do this, you want to implement the code for a
+printer that converts LLVM IR to a GAS-format assembly language for your target
+machine, using the following steps:
+</p>
+
+<ul>
+<li>Define all the assembly strings for your target, adding them to the
+ instructions defined in the <tt>XXXInstrInfo.td</tt> file.
+ (See <a href="#InstructionSet">Instruction Set</a>.) TableGen will produce
+ an output file (<tt>XXXGenAsmWriter.inc</tt>) with an implementation of
+ the <tt>printInstruction</tt> method for the XXXAsmPrinter class.</li>
+
+<li>Write <tt>XXXTargetAsmInfo.h</tt>, which contains the bare-bones declaration
+ of the <tt>XXXTargetAsmInfo</tt> class (a subclass
+ of <tt>TargetAsmInfo</tt>).</li>
+
+<li>Write <tt>XXXTargetAsmInfo.cpp</tt>, which contains target-specific values
+ for <tt>TargetAsmInfo</tt> properties and sometimes new implementations for
+ methods.</li>
+
+<li>Write <tt>XXXAsmPrinter.cpp</tt>, which implements the <tt>AsmPrinter</tt>
+ class that performs the LLVM-to-assembly conversion.</li>
+</ul>
+
+<p>
+The code in <tt>XXXTargetAsmInfo.h</tt> is usually a trivial declaration of the
+<tt>XXXTargetAsmInfo</tt> class for use in <tt>XXXTargetAsmInfo.cpp</tt>.
+Similarly, <tt>XXXTargetAsmInfo.cpp</tt> usually has a few declarations of
+<tt>XXXTargetAsmInfo</tt> replacement values that override the default values
+in <tt>TargetAsmInfo.cpp</tt>. For example in <tt>SparcTargetAsmInfo.cpp</tt>:
+</p>
+
+<div class="doc_code">
+<pre>
+SparcTargetAsmInfo::SparcTargetAsmInfo(const SparcTargetMachine &amp;TM) {
+ Data16bitsDirective = "\t.half\t";
+ Data32bitsDirective = "\t.word\t";
+ Data64bitsDirective = 0; // .xword is only supported by V9.
+ ZeroDirective = "\t.skip\t";
+ CommentString = "!";
+ ConstantPoolSection = "\t.section \".rodata\",#alloc\n";
+}
+</pre>
+</div>
+
+<p>
+The X86 assembly printer implementation (<tt>X86TargetAsmInfo</tt>) is an
+example where the target specific <tt>TargetAsmInfo</tt> class uses an
+overridden methods: <tt>ExpandInlineAsm</tt>.
+</p>
+
+<p>
+A target-specific implementation of AsmPrinter is written in
+<tt>XXXAsmPrinter.cpp</tt>, which implements the <tt>AsmPrinter</tt> class that
+converts the LLVM to printable assembly. The implementation must include the
+following headers that have declarations for the <tt>AsmPrinter</tt> and
+<tt>MachineFunctionPass</tt> classes. The <tt>MachineFunctionPass</tt> is a
+subclass of <tt>FunctionPass</tt>.
+</p>
+
+<div class="doc_code">
+<pre>
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+</pre>
+</div>
+
+<p>
+As a <tt>FunctionPass</tt>, <tt>AsmPrinter</tt> first
+calls <tt>doInitialization</tt> to set up the <tt>AsmPrinter</tt>. In
+<tt>SparcAsmPrinter</tt>, a <tt>Mangler</tt> object is instantiated to process
+variable names.
+</p>
+
+<p>
+In <tt>XXXAsmPrinter.cpp</tt>, the <tt>runOnMachineFunction</tt> method
+(declared in <tt>MachineFunctionPass</tt>) must be implemented
+for <tt>XXXAsmPrinter</tt>. In <tt>MachineFunctionPass</tt>,
+the <tt>runOnFunction</tt> method invokes <tt>runOnMachineFunction</tt>.
+Target-specific implementations of <tt>runOnMachineFunction</tt> differ, but
+generally do the following to process each machine function:
+</p>
+
+<ul>
+<li>Call <tt>SetupMachineFunction</tt> to perform initialization.</li>
+
+<li>Call <tt>EmitConstantPool</tt> to print out (to the output stream) constants
+ which have been spilled to memory.</li>
+
+<li>Call <tt>EmitJumpTableInfo</tt> to print out jump tables used by the current
+ function.</li>
+
+<li>Print out the label for the current function.</li>
+
+<li>Print out the code for the function, including basic block labels and the
+ assembly for the instruction (using <tt>printInstruction</tt>)</li>
+</ul>
+
+<p>
+The <tt>XXXAsmPrinter</tt> implementation must also include the code generated
+by TableGen that is output in the <tt>XXXGenAsmWriter.inc</tt> file. The code
+in <tt>XXXGenAsmWriter.inc</tt> contains an implementation of the
+<tt>printInstruction</tt> method that may call these methods:
+</p>
+
+<ul>
+<li><tt>printOperand</tt></li>
+
+<li><tt>printMemOperand</tt></li>
+
+<li><tt>printCCOperand (for conditional statements)</tt></li>
+
+<li><tt>printDataDirective</tt></li>
+
+<li><tt>printDeclare</tt></li>
+
+<li><tt>printImplicitDef</tt></li>
+
+<li><tt>printInlineAsm</tt></li>
+</ul>
+
+<p>
+The implementations of <tt>printDeclare</tt>, <tt>printImplicitDef</tt>,
+<tt>printInlineAsm</tt>, and <tt>printLabel</tt> in <tt>AsmPrinter.cpp</tt> are
+generally adequate for printing assembly and do not need to be
+overridden.
+</p>
+
+<p>
+The <tt>printOperand</tt> method is implemented with a long switch/case
+statement for the type of operand: register, immediate, basic block, external
+symbol, global address, constant pool index, or jump table index. For an
+instruction with a memory address operand, the <tt>printMemOperand</tt> method
+should be implemented to generate the proper output. Similarly,
+<tt>printCCOperand</tt> should be used to print a conditional operand.
+</p>
+
+<p><tt>doFinalization</tt> should be overridden in <tt>XXXAsmPrinter</tt>, and
+it should be called to shut down the assembly printer. During
+<tt>doFinalization</tt>, global variables and constants are printed to
+output.
+</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="subtargetSupport">Subtarget Support</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Subtarget support is used to inform the code generation process of instruction
+set variations for a given chip set. For example, the LLVM SPARC implementation
+provided covers three major versions of the SPARC microprocessor architecture:
+Version 8 (V8, which is a 32-bit architecture), Version 9 (V9, a 64-bit
+architecture), and the UltraSPARC architecture. V8 has 16 double-precision
+floating-point registers that are also usable as either 32 single-precision or 8
+quad-precision registers. V8 is also purely big-endian. V9 has 32
+double-precision floating-point registers that are also usable as 16
+quad-precision registers, but cannot be used as single-precision registers. The
+UltraSPARC architecture combines V9 with UltraSPARC Visual Instruction Set
+extensions.
+</p>
+
+<p>
+If subtarget support is needed, you should implement a target-specific
+XXXSubtarget class for your architecture. This class should process the
+command-line options <tt>-mcpu=</tt> and <tt>-mattr=</tt>.
+</p>
+
+<p>
+TableGen uses definitions in the <tt>Target.td</tt> and <tt>Sparc.td</tt> files
+to generate code in <tt>SparcGenSubtarget.inc</tt>. In <tt>Target.td</tt>, shown
+below, the <tt>SubtargetFeature</tt> interface is defined. The first 4 string
+parameters of the <tt>SubtargetFeature</tt> interface are a feature name, an
+attribute set by the feature, the value of the attribute, and a description of
+the feature. (The fifth parameter is a list of features whose presence is
+implied, and its default value is an empty array.)
+</p>
+
+<div class="doc_code">
+<pre>
+class SubtargetFeature&lt;string n, string a, string v, string d,
+ list&lt;SubtargetFeature&gt; i = []&gt; {
+ string Name = n;
+ string Attribute = a;
+ string Value = v;
+ string Desc = d;
+ list&lt;SubtargetFeature&gt; Implies = i;
+}
+</pre>
+</div>
+
+<p>
+In the <tt>Sparc.td</tt> file, the SubtargetFeature is used to define the
+following features.
+</p>
+
+<div class="doc_code">
+<pre>
+def FeatureV9 : SubtargetFeature&lt;"v9", "IsV9", "true",
+ "Enable SPARC-V9 instructions"&gt;;
+def FeatureV8Deprecated : SubtargetFeature&lt;"deprecated-v8",
+ "V8DeprecatedInsts", "true",
+ "Enable deprecated V8 instructions in V9 mode"&gt;;
+def FeatureVIS : SubtargetFeature&lt;"vis", "IsVIS", "true",
+ "Enable UltraSPARC Visual Instruction Set extensions"&gt;;
+</pre>
+</div>
+
+<p>
+Elsewhere in <tt>Sparc.td</tt>, the Proc class is defined and then is used to
+define particular SPARC processor subtypes that may have the previously
+described features.
+</p>
+
+<div class="doc_code">
+<pre>
+class Proc&lt;string Name, list&lt;SubtargetFeature&gt; Features&gt;
+ : Processor&lt;Name, NoItineraries, Features&gt;;
+&nbsp;
+def : Proc&lt;"generic", []&gt;;
+def : Proc&lt;"v8", []&gt;;
+def : Proc&lt;"supersparc", []&gt;;
+def : Proc&lt;"sparclite", []&gt;;
+def : Proc&lt;"f934", []&gt;;
+def : Proc&lt;"hypersparc", []&gt;;
+def : Proc&lt;"sparclite86x", []&gt;;
+def : Proc&lt;"sparclet", []&gt;;
+def : Proc&lt;"tsc701", []&gt;;
+def : Proc&lt;"v9", [FeatureV9]&gt;;
+def : Proc&lt;"ultrasparc", [FeatureV9, FeatureV8Deprecated]&gt;;
+def : Proc&lt;"ultrasparc3", [FeatureV9, FeatureV8Deprecated]&gt;;
+def : Proc&lt;"ultrasparc3-vis", [FeatureV9, FeatureV8Deprecated, FeatureVIS]&gt;;
+</pre>
+</div>
+
+<p>
+From <tt>Target.td</tt> and <tt>Sparc.td</tt> files, the resulting
+SparcGenSubtarget.inc specifies enum values to identify the features, arrays of
+constants to represent the CPU features and CPU subtypes, and the
+ParseSubtargetFeatures method that parses the features string that sets
+specified subtarget options. The generated <tt>SparcGenSubtarget.inc</tt> file
+should be included in the <tt>SparcSubtarget.cpp</tt>. The target-specific
+implementation of the XXXSubtarget method should follow this pseudocode:
+</p>
+
+<div class="doc_code">
+<pre>
+XXXSubtarget::XXXSubtarget(const Module &amp;M, const std::string &amp;FS) {
+ // Set the default features
+ // Determine default and user specified characteristics of the CPU
+ // Call ParseSubtargetFeatures(FS, CPU) to parse the features string
+ // Perform any additional operations
+}
+</pre>
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="jitSupport">JIT Support</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+The implementation of a target machine optionally includes a Just-In-Time (JIT)
+code generator that emits machine code and auxiliary structures as binary output
+that can be written directly to memory. To do this, implement JIT code
+generation by performing the following steps:
+</p>
+
+<ul>
+<li>Write an <tt>XXXCodeEmitter.cpp</tt> file that contains a machine function
+ pass that transforms target-machine instructions into relocatable machine
+ code.</li>
+
+<li>Write an <tt>XXXJITInfo.cpp</tt> file that implements the JIT interfaces for
+ target-specific code-generation activities, such as emitting machine code
+ and stubs.</li>
+
+<li>Modify <tt>XXXTargetMachine</tt> so that it provides a
+ <tt>TargetJITInfo</tt> object through its <tt>getJITInfo</tt> method.</li>
+</ul>
+
+<p>
+There are several different approaches to writing the JIT support code. For
+instance, TableGen and target descriptor files may be used for creating a JIT
+code generator, but are not mandatory. For the Alpha and PowerPC target
+machines, TableGen is used to generate <tt>XXXGenCodeEmitter.inc</tt>, which
+contains the binary coding of machine instructions and the
+<tt>getBinaryCodeForInstr</tt> method to access those codes. Other JIT
+implementations do not.
+</p>
+
+<p>
+Both <tt>XXXJITInfo.cpp</tt> and <tt>XXXCodeEmitter.cpp</tt> must include the
+<tt>llvm/CodeGen/MachineCodeEmitter.h</tt> header file that defines the
+<tt>MachineCodeEmitter</tt> class containing code for several callback functions
+that write data (in bytes, words, strings, etc.) to the output stream.
+</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="mce">Machine Code Emitter</a>
+</h3>
+
+<div>
+
+<p>
+In <tt>XXXCodeEmitter.cpp</tt>, a target-specific of the <tt>Emitter</tt> class
+is implemented as a function pass (subclass
+of <tt>MachineFunctionPass</tt>). The target-specific implementation
+of <tt>runOnMachineFunction</tt> (invoked by
+<tt>runOnFunction</tt> in <tt>MachineFunctionPass</tt>) iterates through the
+<tt>MachineBasicBlock</tt> calls <tt>emitInstruction</tt> to process each
+instruction and emit binary code. <tt>emitInstruction</tt> is largely
+implemented with case statements on the instruction types defined in
+<tt>XXXInstrInfo.h</tt>. For example, in <tt>X86CodeEmitter.cpp</tt>,
+the <tt>emitInstruction</tt> method is built around the following switch/case
+statements:
+</p>
+
+<div class="doc_code">
+<pre>
+switch (Desc-&gt;TSFlags &amp; X86::FormMask) {
+case X86II::Pseudo: // for not yet implemented instructions
+ ... // or pseudo-instructions
+ break;
+case X86II::RawFrm: // for instructions with a fixed opcode value
+ ...
+ break;
+case X86II::AddRegFrm: // for instructions that have one register operand
+ ... // added to their opcode
+ break;
+case X86II::MRMDestReg:// for instructions that use the Mod/RM byte
+ ... // to specify a destination (register)
+ break;
+case X86II::MRMDestMem:// for instructions that use the Mod/RM byte
+ ... // to specify a destination (memory)
+ break;
+case X86II::MRMSrcReg: // for instructions that use the Mod/RM byte
+ ... // to specify a source (register)
+ break;
+case X86II::MRMSrcMem: // for instructions that use the Mod/RM byte
+ ... // to specify a source (memory)
+ break;
+case X86II::MRM0r: case X86II::MRM1r: // for instructions that operate on
+case X86II::MRM2r: case X86II::MRM3r: // a REGISTER r/m operand and
+case X86II::MRM4r: case X86II::MRM5r: // use the Mod/RM byte and a field
+case X86II::MRM6r: case X86II::MRM7r: // to hold extended opcode data
+ ...
+ break;
+case X86II::MRM0m: case X86II::MRM1m: // for instructions that operate on
+case X86II::MRM2m: case X86II::MRM3m: // a MEMORY r/m operand and
+case X86II::MRM4m: case X86II::MRM5m: // use the Mod/RM byte and a field
+case X86II::MRM6m: case X86II::MRM7m: // to hold extended opcode data
+ ...
+ break;
+case X86II::MRMInitReg: // for instructions whose source and
+ ... // destination are the same register
+ break;
+}
+</pre>
+</div>
+
+<p>
+The implementations of these case statements often first emit the opcode and
+then get the operand(s). Then depending upon the operand, helper methods may be
+called to process the operand(s). For example, in <tt>X86CodeEmitter.cpp</tt>,
+for the <tt>X86II::AddRegFrm</tt> case, the first data emitted
+(by <tt>emitByte</tt>) is the opcode added to the register operand. Then an
+object representing the machine operand, <tt>MO1</tt>, is extracted. The helper
+methods such as <tt>isImmediate</tt>,
+<tt>isGlobalAddress</tt>, <tt>isExternalSymbol</tt>, <tt>isConstantPoolIndex</tt>, and
+<tt>isJumpTableIndex</tt> determine the operand
+type. (<tt>X86CodeEmitter.cpp</tt> also has private methods such
+as <tt>emitConstant</tt>, <tt>emitGlobalAddress</tt>,
+<tt>emitExternalSymbolAddress</tt>, <tt>emitConstPoolAddress</tt>,
+and <tt>emitJumpTableAddress</tt> that emit the data into the output stream.)
+</p>
+
+<div class="doc_code">
+<pre>
+case X86II::AddRegFrm:
+ MCE.emitByte(BaseOpcode + getX86RegNum(MI.getOperand(CurOp++).getReg()));
+
+ if (CurOp != NumOps) {
+ const MachineOperand &amp;MO1 = MI.getOperand(CurOp++);
+ unsigned Size = X86InstrInfo::sizeOfImm(Desc);
+ if (MO1.isImmediate())
+ emitConstant(MO1.getImm(), Size);
+ else {
+ unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
+ : (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
+ if (Opcode == X86::MOV64ri)
+ rt = X86::reloc_absolute_dword; // FIXME: add X86II flag?
+ if (MO1.isGlobalAddress()) {
+ bool NeedStub = isa&lt;Function&gt;(MO1.getGlobal());
+ bool isLazy = gvNeedsLazyPtr(MO1.getGlobal());
+ emitGlobalAddress(MO1.getGlobal(), rt, MO1.getOffset(), 0,
+ NeedStub, isLazy);
+ } else if (MO1.isExternalSymbol())
+ emitExternalSymbolAddress(MO1.getSymbolName(), rt);
+ else if (MO1.isConstantPoolIndex())
+ emitConstPoolAddress(MO1.getIndex(), rt);
+ else if (MO1.isJumpTableIndex())
+ emitJumpTableAddress(MO1.getIndex(), rt);
+ }
+ }
+ break;
+</pre>
+</div>
+
+<p>
+In the previous example, <tt>XXXCodeEmitter.cpp</tt> uses the
+variable <tt>rt</tt>, which is a RelocationType enum that may be used to
+relocate addresses (for example, a global address with a PIC base offset). The
+<tt>RelocationType</tt> enum for that target is defined in the short
+target-specific <tt>XXXRelocations.h</tt> file. The <tt>RelocationType</tt> is used by
+the <tt>relocate</tt> method defined in <tt>XXXJITInfo.cpp</tt> to rewrite
+addresses for referenced global symbols.
+</p>
+
+<p>
+For example, <tt>X86Relocations.h</tt> specifies the following relocation types
+for the X86 addresses. In all four cases, the relocated value is added to the
+value already in memory. For <tt>reloc_pcrel_word</tt>
+and <tt>reloc_picrel_word</tt>, there is an additional initial adjustment.
+</p>
+
+<div class="doc_code">
+<pre>
+enum RelocationType {
+ reloc_pcrel_word = 0, // add reloc value after adjusting for the PC loc
+ reloc_picrel_word = 1, // add reloc value after adjusting for the PIC base
+ reloc_absolute_word = 2, // absolute relocation; no additional adjustment
+ reloc_absolute_dword = 3 // absolute relocation; no additional adjustment
+};
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="targetJITInfo">Target JIT Info</a>
+</h3>
+
+<div>
+
+<p>
+<tt>XXXJITInfo.cpp</tt> implements the JIT interfaces for target-specific
+code-generation activities, such as emitting machine code and stubs. At minimum,
+a target-specific version of <tt>XXXJITInfo</tt> implements the following:
+</p>
+
+<ul>
+<li><tt>getLazyResolverFunction</tt> &mdash; Initializes the JIT, gives the
+ target a function that is used for compilation.</li>
+
+<li><tt>emitFunctionStub</tt> &mdash; Returns a native function with a specified
+ address for a callback function.</li>
+
+<li><tt>relocate</tt> &mdash; Changes the addresses of referenced globals, based
+ on relocation types.</li>
+
+<li>Callback function that are wrappers to a function stub that is used when the
+ real target is not initially known.</li>
+</ul>
+
+<p>
+<tt>getLazyResolverFunction</tt> is generally trivial to implement. It makes the
+incoming parameter as the global <tt>JITCompilerFunction</tt> and returns the
+callback function that will be used a function wrapper. For the Alpha target
+(in <tt>AlphaJITInfo.cpp</tt>), the <tt>getLazyResolverFunction</tt>
+implementation is simply:
+</p>
+
+<div class="doc_code">
+<pre>
+TargetJITInfo::LazyResolverFn AlphaJITInfo::getLazyResolverFunction(
+ JITCompilerFn F) {
+ JITCompilerFunction = F;
+ return AlphaCompilationCallback;
+}
+</pre>
+</div>
+
+<p>
+For the X86 target, the <tt>getLazyResolverFunction</tt> implementation is a
+little more complication, because it returns a different callback function for
+processors with SSE instructions and XMM registers.
+</p>
+
+<p>
+The callback function initially saves and later restores the callee register
+values, incoming arguments, and frame and return address. The callback function
+needs low-level access to the registers or stack, so it is typically implemented
+with assembler.
+</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="http://www.woo.com">Mason Woo</a> and <a href="http://misha.brukman.net">Misha Brukman</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a>
+ <br>
+ Last modified: $Date$
+</address>
+
+</body>
+</html>
diff --git a/docs/WritingAnLLVMPass.html b/docs/WritingAnLLVMPass.html
new file mode 100644
index 00000000000..af1ffa4fb7a
--- /dev/null
+++ b/docs/WritingAnLLVMPass.html
@@ -0,0 +1,1954 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>Writing an LLVM Pass</title>
+ <link rel="stylesheet" href="_static/llvm.css" type="text/css">
+</head>
+<body>
+
+<h1>
+ Writing an LLVM Pass
+</h1>
+
+<ol>
+ <li><a href="#introduction">Introduction - What is a pass?</a></li>
+ <li><a href="#quickstart">Quick Start - Writing hello world</a>
+ <ul>
+ <li><a href="#makefile">Setting up the build environment</a></li>
+ <li><a href="#basiccode">Basic code required</a></li>
+ <li><a href="#running">Running a pass with <tt>opt</tt></a></li>
+ </ul></li>
+ <li><a href="#passtype">Pass classes and requirements</a>
+ <ul>
+ <li><a href="#ImmutablePass">The <tt>ImmutablePass</tt> class</a></li>
+ <li><a href="#ModulePass">The <tt>ModulePass</tt> class</a>
+ <ul>
+ <li><a href="#runOnModule">The <tt>runOnModule</tt> method</a></li>
+ </ul></li>
+ <li><a href="#CallGraphSCCPass">The <tt>CallGraphSCCPass</tt> class</a>
+ <ul>
+ <li><a href="#doInitialization_scc">The <tt>doInitialization(CallGraph
+ &amp;)</tt> method</a></li>
+ <li><a href="#runOnSCC">The <tt>runOnSCC</tt> method</a></li>
+ <li><a href="#doFinalization_scc">The <tt>doFinalization(CallGraph
+ &amp;)</tt> method</a></li>
+ </ul></li>
+ <li><a href="#FunctionPass">The <tt>FunctionPass</tt> class</a>
+ <ul>
+ <li><a href="#doInitialization_mod">The <tt>doInitialization(Module
+ &amp;)</tt> method</a></li>
+ <li><a href="#runOnFunction">The <tt>runOnFunction</tt> method</a></li>
+ <li><a href="#doFinalization_mod">The <tt>doFinalization(Module
+ &amp;)</tt> method</a></li>
+ </ul></li>
+ <li><a href="#LoopPass">The <tt>LoopPass</tt> class</a>
+ <ul>
+ <li><a href="#doInitialization_loop">The <tt>doInitialization(Loop *,
+ LPPassManager &amp;)</tt> method</a></li>
+ <li><a href="#runOnLoop">The <tt>runOnLoop</tt> method</a></li>
+ <li><a href="#doFinalization_loop">The <tt>doFinalization()
+ </tt> method</a></li>
+ </ul></li>
+ <li><a href="#RegionPass">The <tt>RegionPass</tt> class</a>
+ <ul>
+ <li><a href="#doInitialization_region">The <tt>doInitialization(Region *,
+ RGPassManager &amp;)</tt> method</a></li>
+ <li><a href="#runOnRegion">The <tt>runOnRegion</tt> method</a></li>
+ <li><a href="#doFinalization_region">The <tt>doFinalization()
+ </tt> method</a></li>
+ </ul></li>
+ <li><a href="#BasicBlockPass">The <tt>BasicBlockPass</tt> class</a>
+ <ul>
+ <li><a href="#doInitialization_fn">The <tt>doInitialization(Function
+ &amp;)</tt> method</a></li>
+ <li><a href="#runOnBasicBlock">The <tt>runOnBasicBlock</tt>
+ method</a></li>
+ <li><a href="#doFinalization_fn">The <tt>doFinalization(Function
+ &amp;)</tt> method</a></li>
+ </ul></li>
+ <li><a href="#MachineFunctionPass">The <tt>MachineFunctionPass</tt>
+ class</a>
+ <ul>
+ <li><a href="#runOnMachineFunction">The
+ <tt>runOnMachineFunction(MachineFunction &amp;)</tt> method</a></li>
+ </ul></li>
+ </ul>
+ <li><a href="#registration">Pass Registration</a>
+ <ul>
+ <li><a href="#print">The <tt>print</tt> method</a></li>
+ </ul></li>
+ <li><a href="#interaction">Specifying interactions between passes</a>
+ <ul>
+ <li><a href="#getAnalysisUsage">The <tt>getAnalysisUsage</tt>
+ method</a></li>
+ <li><a href="#AU::addRequired">The <tt>AnalysisUsage::addRequired&lt;&gt;</tt> and <tt>AnalysisUsage::addRequiredTransitive&lt;&gt;</tt> methods</a></li>
+ <li><a href="#AU::addPreserved">The <tt>AnalysisUsage::addPreserved&lt;&gt;</tt> method</a></li>
+ <li><a href="#AU::examples">Example implementations of <tt>getAnalysisUsage</tt></a></li>
+ <li><a href="#getAnalysis">The <tt>getAnalysis&lt;&gt;</tt> and
+<tt>getAnalysisIfAvailable&lt;&gt;</tt> methods</a></li>
+ </ul></li>
+ <li><a href="#analysisgroup">Implementing Analysis Groups</a>
+ <ul>
+ <li><a href="#agconcepts">Analysis Group Concepts</a></li>
+ <li><a href="#registerag">Using <tt>RegisterAnalysisGroup</tt></a></li>
+ </ul></li>
+ <li><a href="#passStatistics">Pass Statistics</a>
+ <li><a href="#passmanager">What PassManager does</a>
+ <ul>
+ <li><a href="#releaseMemory">The <tt>releaseMemory</tt> method</a></li>
+ </ul></li>
+ <li><a href="#registering">Registering dynamically loaded passes</a>
+ <ul>
+ <li><a href="#registering_existing">Using existing registries</a></li>
+ <li><a href="#registering_new">Creating new registries</a></li>
+ </ul></li>
+ <li><a href="#debughints">Using GDB with dynamically loaded passes</a>
+ <ul>
+ <li><a href="#breakpoint">Setting a breakpoint in your pass</a></li>
+ <li><a href="#debugmisc">Miscellaneous Problems</a></li>
+ </ul></li>
+ <li><a href="#future">Future extensions planned</a>
+ <ul>
+ <li><a href="#SMP">Multithreaded LLVM</a></li>
+ </ul></li>
+</ol>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a> and
+ <a href="mailto:jlaskey@mac.com">Jim Laskey</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="introduction">Introduction - What is a pass?</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The LLVM Pass Framework is an important part of the LLVM system, because LLVM
+passes are where most of the interesting parts of the compiler exist. Passes
+perform the transformations and optimizations that make up the compiler, they
+build the analysis results that are used by these transformations, and they are,
+above all, a structuring technique for compiler code.</p>
+
+<p>All LLVM passes are subclasses of the <tt><a
+href="http://llvm.org/doxygen/classllvm_1_1Pass.html">Pass</a></tt>
+class, which implement functionality by overriding virtual methods inherited
+from <tt>Pass</tt>. Depending on how your pass works, you should inherit from
+the <tt><a href="#ModulePass">ModulePass</a></tt>, <tt><a
+href="#CallGraphSCCPass">CallGraphSCCPass</a></tt>, <tt><a
+href="#FunctionPass">FunctionPass</a></tt>, or <tt><a
+href="#LoopPass">LoopPass</a></tt>, or <tt><a
+href="#RegionPass">RegionPass</a></tt>, or <tt><a
+href="#BasicBlockPass">BasicBlockPass</a></tt> classes, which gives the system
+more information about what your pass does, and how it can be combined with
+other passes. One of the main features of the LLVM Pass Framework is that it
+schedules passes to run in an efficient way based on the constraints that your
+pass meets (which are indicated by which class they derive from).</p>
+
+<p>We start by showing you how to construct a pass, everything from setting up
+the code, to compiling, loading, and executing it. After the basics are down,
+more advanced features are discussed.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="quickstart">Quick Start - Writing hello world</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Here we describe how to write the "hello world" of passes. The "Hello" pass
+is designed to simply print out the name of non-external functions that exist in
+the program being compiled. It does not modify the program at all, it just
+inspects it. The source code and files for this pass are available in the LLVM
+source tree in the <tt>lib/Transforms/Hello</tt> directory.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="makefile">Setting up the build environment</a>
+</h3>
+
+<div>
+
+ <p>First, configure and build LLVM. This needs to be done directly inside the
+ LLVM source tree rather than in a separate objects directory.
+ Next, you need to create a new directory somewhere in the LLVM source
+ base. For this example, we'll assume that you made
+ <tt>lib/Transforms/Hello</tt>. Finally, you must set up a build script
+ (Makefile) that will compile the source code for the new pass. To do this,
+ copy the following into <tt>Makefile</tt>:</p>
+ <hr>
+
+<div class="doc_code"><pre>
+# Makefile for hello pass
+
+# Path to top level of LLVM hierarchy
+LEVEL = ../../..
+
+# Name of the library to build
+LIBRARYNAME = Hello
+
+# Make the shared library become a loadable module so the tools can
+# dlopen/dlsym on the resulting library.
+LOADABLE_MODULE = 1
+
+# Include the makefile implementation stuff
+include $(LEVEL)/Makefile.common
+</pre></div>
+
+<p>This makefile specifies that all of the <tt>.cpp</tt> files in the current
+directory are to be compiled and linked together into a shared object
+<tt>$(LEVEL)/Debug+Asserts/lib/Hello.so</tt> that can be dynamically loaded by
+the <tt>opt</tt> or <tt>bugpoint</tt> tools via their <tt>-load</tt> options.
+If your operating system uses a suffix other than .so (such as windows or
+Mac OS/X), the appropriate extension will be used.</p>
+
+<p>If you are used CMake to build LLVM, see
+<a href="CMake.html#passdev">Developing an LLVM pass with CMake</a>.</p>
+
+<p>Now that we have the build scripts set up, we just need to write the code for
+the pass itself.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="basiccode">Basic code required</a>
+</h3>
+
+<div>
+
+<p>Now that we have a way to compile our new pass, we just have to write it.
+Start out with:</p>
+
+<div class="doc_code">
+<pre>
+<b>#include</b> "<a href="http://llvm.org/doxygen/Pass_8h-source.html">llvm/Pass.h</a>"
+<b>#include</b> "<a href="http://llvm.org/doxygen/Function_8h-source.html">llvm/Function.h</a>"
+<b>#include</b> "<a href="http://llvm.org/doxygen/raw__ostream_8h.html">llvm/Support/raw_ostream.h</a>"
+</pre>
+</div>
+
+<p>Which are needed because we are writing a <tt><a
+href="http://llvm.org/doxygen/classllvm_1_1Pass.html">Pass</a></tt>,
+we are operating on <tt><a
+href="http://llvm.org/doxygen/classllvm_1_1Function.html">Function</a></tt>'s,
+and we will be doing some printing.</p>
+
+<p>Next we have:</p>
+
+<div class="doc_code">
+<pre>
+<b>using namespace llvm;</b>
+</pre>
+</div>
+
+<p>... which is required because the functions from the include files
+live in the llvm namespace.</p>
+
+<p>Next we have:</p>
+
+<div class="doc_code">
+<pre>
+<b>namespace</b> {
+</pre>
+</div>
+
+<p>... which starts out an anonymous namespace. Anonymous namespaces are to C++
+what the "<tt>static</tt>" keyword is to C (at global scope). It makes the
+things declared inside of the anonymous namespace visible only to the current
+file. If you're not familiar with them, consult a decent C++ book for more
+information.</p>
+
+<p>Next, we declare our pass itself:</p>
+
+<div class="doc_code">
+<pre>
+ <b>struct</b> Hello : <b>public</b> <a href="#FunctionPass">FunctionPass</a> {
+</pre>
+</div>
+
+<p>This declares a "<tt>Hello</tt>" class that is a subclass of <tt><a
+href="http://llvm.org/doxygen/classllvm_1_1FunctionPass.html">FunctionPass</a></tt>.
+The different builtin pass subclasses are described in detail <a
+href="#passtype">later</a>, but for now, know that <a
+href="#FunctionPass"><tt>FunctionPass</tt></a>'s operate on a function at a
+time.</p>
+
+<div class="doc_code">
+<pre>
+ static char ID;
+ Hello() : FunctionPass(ID) {}
+</pre>
+</div>
+
+<p>This declares pass identifier used by LLVM to identify pass. This allows LLVM
+to avoid using expensive C++ runtime information.</p>
+
+<div class="doc_code">
+<pre>
+ <b>virtual bool</b> <a href="#runOnFunction">runOnFunction</a>(Function &amp;F) {
+ errs() &lt;&lt; "<i>Hello: </i>";
+ errs().write_escaped(F.getName()) &lt;&lt; "\n";
+ <b>return false</b>;
+ }
+ }; <i>// end of struct Hello</i>
+} <i>// end of anonymous namespace</i>
+</pre>
+</div>
+
+<p>We declare a "<a href="#runOnFunction"><tt>runOnFunction</tt></a>" method,
+which overloads an abstract virtual method inherited from <a
+href="#FunctionPass"><tt>FunctionPass</tt></a>. This is where we are supposed
+to do our thing, so we just print out our message with the name of each
+function.</p>
+
+<div class="doc_code">
+<pre>
+char Hello::ID = 0;
+</pre>
+</div>
+
+<p>We initialize pass ID here. LLVM uses ID's address to identify a pass, so
+initialization value is not important.</p>
+
+<div class="doc_code">
+<pre>
+static RegisterPass&lt;Hello&gt; X("<i>hello</i>", "<i>Hello World Pass</i>",
+ false /* Only looks at CFG */,
+ false /* Analysis Pass */);
+</pre>
+</div>
+
+<p>Lastly, we <a href="#registration">register our class</a> <tt>Hello</tt>,
+giving it a command line argument "<tt>hello</tt>", and a name "<tt>Hello World
+Pass</tt>". The last two arguments describe its behavior: if a pass walks CFG
+without modifying it then the third argument is set to <tt>true</tt>; if a pass
+is an analysis pass, for example dominator tree pass, then <tt>true</tt> is
+supplied as the fourth argument.</p>
+
+<p>As a whole, the <tt>.cpp</tt> file looks like:</p>
+
+<div class="doc_code">
+<pre>
+<b>#include</b> "<a href="http://llvm.org/doxygen/Pass_8h-source.html">llvm/Pass.h</a>"
+<b>#include</b> "<a href="http://llvm.org/doxygen/Function_8h-source.html">llvm/Function.h</a>"
+<b>#include</b> "<a href="http://llvm.org/doxygen/raw__ostream_8h.html">llvm/Support/raw_ostream.h</a>"
+
+<b>using namespace llvm;</b>
+
+<b>namespace</b> {
+ <b>struct Hello</b> : <b>public</b> <a href="#FunctionPass">FunctionPass</a> {
+
+ static char ID;
+ Hello() : FunctionPass(ID) {}
+
+ <b>virtual bool</b> <a href="#runOnFunction">runOnFunction</a>(Function &amp;F) {
+ errs() &lt;&lt; "<i>Hello: </i>";
+ errs().write_escaped(F.getName()) &lt;&lt; '\n';
+ <b>return false</b>;
+ }
+
+ };
+}
+
+char Hello::ID = 0;
+static RegisterPass&lt;Hello&gt; X("hello", "Hello World Pass", false, false);
+</pre>
+</div>
+
+<p>Now that it's all together, compile the file with a simple "<tt>gmake</tt>"
+command in the local directory and you should get a new file
+"<tt>Debug+Asserts/lib/Hello.so</tt>" under the top level directory of the LLVM
+source tree (not in the local directory). Note that everything in this file is
+contained in an anonymous namespace &mdash; this reflects the fact that passes
+are self contained units that do not need external interfaces (although they can
+have them) to be useful.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="running">Running a pass with <tt>opt</tt></a>
+</h3>
+
+<div>
+
+<p>Now that you have a brand new shiny shared object file, we can use the
+<tt>opt</tt> command to run an LLVM program through your pass. Because you
+registered your pass with <tt>RegisterPass</tt>, you will be able to
+use the <tt>opt</tt> tool to access it, once loaded.</p>
+
+<p>To test it, follow the example at the end of the <a
+href="GettingStarted.html">Getting Started Guide</a> to compile "Hello World" to
+LLVM. We can now run the bitcode file (<tt>hello.bc</tt>) for the program
+through our transformation like this (or course, any bitcode file will
+work):</p>
+
+<div class="doc_code"><pre>
+$ opt -load ../../../Debug+Asserts/lib/Hello.so -hello &lt; hello.bc &gt; /dev/null
+Hello: __main
+Hello: puts
+Hello: main
+</pre></div>
+
+<p>The '<tt>-load</tt>' option specifies that '<tt>opt</tt>' should load your
+pass as a shared object, which makes '<tt>-hello</tt>' a valid command line
+argument (which is one reason you need to <a href="#registration">register your
+pass</a>). Because the hello pass does not modify the program in any
+interesting way, we just throw away the result of <tt>opt</tt> (sending it to
+<tt>/dev/null</tt>).</p>
+
+<p>To see what happened to the other string you registered, try running
+<tt>opt</tt> with the <tt>-help</tt> option:</p>
+
+<div class="doc_code"><pre>
+$ opt -load ../../../Debug+Asserts/lib/Hello.so -help
+OVERVIEW: llvm .bc -&gt; .bc modular optimizer
+
+USAGE: opt [options] &lt;input bitcode&gt;
+
+OPTIONS:
+ Optimizations available:
+...
+ -globalopt - Global Variable Optimizer
+ -globalsmodref-aa - Simple mod/ref analysis for globals
+ -gvn - Global Value Numbering
+ <b>-hello - Hello World Pass</b>
+ -indvars - Induction Variable Simplification
+ -inline - Function Integration/Inlining
+ -insert-edge-profiling - Insert instrumentation for edge profiling
+...
+</pre></div>
+
+<p>The pass name gets added as the information string for your pass, giving some
+documentation to users of <tt>opt</tt>. Now that you have a working pass, you
+would go ahead and make it do the cool transformations you want. Once you get
+it all working and tested, it may become useful to find out how fast your pass
+is. The <a href="#passManager"><tt>PassManager</tt></a> provides a nice command
+line option (<tt>--time-passes</tt>) that allows you to get information about
+the execution time of your pass along with the other passes you queue up. For
+example:</p>
+
+<div class="doc_code"><pre>
+$ opt -load ../../../Debug+Asserts/lib/Hello.so -hello -time-passes &lt; hello.bc &gt; /dev/null
+Hello: __main
+Hello: puts
+Hello: main
+===============================================================================
+ ... Pass execution timing report ...
+===============================================================================
+ Total Execution Time: 0.02 seconds (0.0479059 wall clock)
+
+ ---User Time--- --System Time-- --User+System-- ---Wall Time--- --- Pass Name ---
+ 0.0100 (100.0%) 0.0000 ( 0.0%) 0.0100 ( 50.0%) 0.0402 ( 84.0%) Bitcode Writer
+ 0.0000 ( 0.0%) 0.0100 (100.0%) 0.0100 ( 50.0%) 0.0031 ( 6.4%) Dominator Set Construction
+ 0.0000 ( 0.0%) 0.0000 ( 0.0%) 0.0000 ( 0.0%) 0.0013 ( 2.7%) Module Verifier
+ <b> 0.0000 ( 0.0%) 0.0000 ( 0.0%) 0.0000 ( 0.0%) 0.0033 ( 6.9%) Hello World Pass</b>
+ 0.0100 (100.0%) 0.0100 (100.0%) 0.0200 (100.0%) 0.0479 (100.0%) TOTAL
+</pre></div>
+
+<p>As you can see, our implementation above is pretty fast :). The additional
+passes listed are automatically inserted by the '<tt>opt</tt>' tool to verify
+that the LLVM emitted by your pass is still valid and well formed LLVM, which
+hasn't been broken somehow.</p>
+
+<p>Now that you have seen the basics of the mechanics behind passes, we can talk
+about some more details of how they work and how to use them.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="passtype">Pass classes and requirements</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>One of the first things that you should do when designing a new pass is to
+decide what class you should subclass for your pass. The <a
+href="#basiccode">Hello World</a> example uses the <tt><a
+href="#FunctionPass">FunctionPass</a></tt> class for its implementation, but we
+did not discuss why or when this should occur. Here we talk about the classes
+available, from the most general to the most specific.</p>
+
+<p>When choosing a superclass for your Pass, you should choose the <b>most
+specific</b> class possible, while still being able to meet the requirements
+listed. This gives the LLVM Pass Infrastructure information necessary to
+optimize how passes are run, so that the resultant compiler isn't unnecessarily
+slow.</p>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ImmutablePass">The <tt>ImmutablePass</tt> class</a>
+</h3>
+
+<div>
+
+<p>The most plain and boring type of pass is the "<tt><a
+href="http://llvm.org/doxygen/classllvm_1_1ImmutablePass.html">ImmutablePass</a></tt>"
+class. This pass type is used for passes that do not have to be run, do not
+change state, and never need to be updated. This is not a normal type of
+transformation or analysis, but can provide information about the current
+compiler configuration.</p>
+
+<p>Although this pass class is very infrequently used, it is important for
+providing information about the current target machine being compiled for, and
+other static information that can affect the various transformations.</p>
+
+<p><tt>ImmutablePass</tt>es never invalidate other transformations, are never
+invalidated, and are never "run".</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="ModulePass">The <tt>ModulePass</tt> class</a>
+</h3>
+
+<div>
+
+<p>The "<tt><a
+href="http://llvm.org/doxygen/classllvm_1_1ModulePass.html">ModulePass</a></tt>"
+class is the most general of all superclasses that you can use. Deriving from
+<tt>ModulePass</tt> indicates that your pass uses the entire program as a unit,
+referring to function bodies in no predictable order, or adding and removing
+functions. Because nothing is known about the behavior of <tt>ModulePass</tt>
+subclasses, no optimization can be done for their execution.</p>
+
+<p>A module pass can use function level passes (e.g. dominators) using
+the getAnalysis interface
+<tt>getAnalysis&lt;DominatorTree&gt;(llvm::Function *)</tt> to provide the
+function to retrieve analysis result for, if the function pass does not require
+any module or immutable passes. Note that this can only be done for functions for which the
+analysis ran, e.g. in the case of dominators you should only ask for the
+DominatorTree for function definitions, not declarations.</p>
+
+<p>To write a correct <tt>ModulePass</tt> subclass, derive from
+<tt>ModulePass</tt> and overload the <tt>runOnModule</tt> method with the
+following signature:</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="runOnModule">The <tt>runOnModule</tt> method</a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> runOnModule(Module &amp;M) = 0;
+</pre></div>
+
+<p>The <tt>runOnModule</tt> method performs the interesting work of the pass.
+It should return true if the module was modified by the transformation and
+false otherwise.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="CallGraphSCCPass">The <tt>CallGraphSCCPass</tt> class</a>
+</h3>
+
+<div>
+
+<p>The "<tt><a
+href="http://llvm.org/doxygen/classllvm_1_1CallGraphSCCPass.html">CallGraphSCCPass</a></tt>"
+is used by passes that need to traverse the program bottom-up on the call graph
+(callees before callers). Deriving from CallGraphSCCPass provides some
+mechanics for building and traversing the CallGraph, but also allows the system
+to optimize execution of CallGraphSCCPass's. If your pass meets the
+requirements outlined below, and doesn't meet the requirements of a <tt><a
+href="#FunctionPass">FunctionPass</a></tt> or <tt><a
+href="#BasicBlockPass">BasicBlockPass</a></tt>, you should derive from
+<tt>CallGraphSCCPass</tt>.</p>
+
+<p><b>TODO</b>: explain briefly what SCC, Tarjan's algo, and B-U mean.</p>
+
+<p>To be explicit, <tt>CallGraphSCCPass</tt> subclasses are:</p>
+
+<ol>
+
+<li>... <em>not allowed</em> to inspect or modify any <tt>Function</tt>s other
+than those in the current SCC and the direct callers and direct callees of the
+SCC.</li>
+
+<li>... <em>required</em> to preserve the current CallGraph object, updating it
+to reflect any changes made to the program.</li>
+
+<li>... <em>not allowed</em> to add or remove SCC's from the current Module,
+though they may change the contents of an SCC.</li>
+
+<li>... <em>allowed</em> to add or remove global variables from the current
+Module.</li>
+
+<li>... <em>allowed</em> to maintain state across invocations of
+ <a href="#runOnSCC"><tt>runOnSCC</tt></a> (including global data).</li>
+</ol>
+
+<p>Implementing a <tt>CallGraphSCCPass</tt> is slightly tricky in some cases
+because it has to handle SCCs with more than one node in it. All of the virtual
+methods described below should return true if they modified the program, or
+false if they didn't.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="doInitialization_scc">
+ The <tt>doInitialization(CallGraph &amp;)</tt> method
+ </a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> doInitialization(CallGraph &amp;CG);
+</pre></div>
+
+<p>The <tt>doIninitialize</tt> method is allowed to do most of the things that
+<tt>CallGraphSCCPass</tt>'s are not allowed to do. They can add and remove
+functions, get pointers to functions, etc. The <tt>doInitialization</tt> method
+is designed to do simple initialization type of stuff that does not depend on
+the SCCs being processed. The <tt>doInitialization</tt> method call is not
+scheduled to overlap with any other pass executions (thus it should be very
+fast).</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="runOnSCC">The <tt>runOnSCC</tt> method</a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> runOnSCC(CallGraphSCC &amp;SCC) = 0;
+</pre></div>
+
+<p>The <tt>runOnSCC</tt> method performs the interesting work of the pass, and
+should return true if the module was modified by the transformation, false
+otherwise.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="doFinalization_scc">
+ The <tt>doFinalization(CallGraph &amp;)</tt> method
+ </a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> doFinalization(CallGraph &amp;CG);
+</pre></div>
+
+<p>The <tt>doFinalization</tt> method is an infrequently used method that is
+called when the pass framework has finished calling <a
+href="#runOnFunction"><tt>runOnFunction</tt></a> for every function in the
+program being compiled.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="FunctionPass">The <tt>FunctionPass</tt> class</a>
+</h3>
+
+<div>
+
+<p>In contrast to <tt>ModulePass</tt> subclasses, <tt><a
+href="http://llvm.org/doxygen/classllvm_1_1Pass.html">FunctionPass</a></tt>
+subclasses do have a predictable, local behavior that can be expected by the
+system. All <tt>FunctionPass</tt> execute on each function in the program
+independent of all of the other functions in the program.
+<tt>FunctionPass</tt>'s do not require that they are executed in a particular
+order, and <tt>FunctionPass</tt>'s do not modify external functions.</p>
+
+<p>To be explicit, <tt>FunctionPass</tt> subclasses are not allowed to:</p>
+
+<ol>
+<li>Modify a Function other than the one currently being processed.</li>
+<li>Add or remove Function's from the current Module.</li>
+<li>Add or remove global variables from the current Module.</li>
+<li>Maintain state across invocations of
+ <a href="#runOnFunction"><tt>runOnFunction</tt></a> (including global data)</li>
+</ol>
+
+<p>Implementing a <tt>FunctionPass</tt> is usually straightforward (See the <a
+href="#basiccode">Hello World</a> pass for example). <tt>FunctionPass</tt>'s
+may overload three virtual methods to do their work. All of these methods
+should return true if they modified the program, or false if they didn't.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="doInitialization_mod">
+ The <tt>doInitialization(Module &amp;)</tt> method
+ </a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> doInitialization(Module &amp;M);
+</pre></div>
+
+<p>The <tt>doIninitialize</tt> method is allowed to do most of the things that
+<tt>FunctionPass</tt>'s are not allowed to do. They can add and remove
+functions, get pointers to functions, etc. The <tt>doInitialization</tt> method
+is designed to do simple initialization type of stuff that does not depend on
+the functions being processed. The <tt>doInitialization</tt> method call is not
+scheduled to overlap with any other pass executions (thus it should be very
+fast).</p>
+
+<p>A good example of how this method should be used is the <a
+href="http://llvm.org/doxygen/LowerAllocations_8cpp-source.html">LowerAllocations</a>
+pass. This pass converts <tt>malloc</tt> and <tt>free</tt> instructions into
+platform dependent <tt>malloc()</tt> and <tt>free()</tt> function calls. It
+uses the <tt>doInitialization</tt> method to get a reference to the malloc and
+free functions that it needs, adding prototypes to the module if necessary.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="runOnFunction">The <tt>runOnFunction</tt> method</a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> runOnFunction(Function &amp;F) = 0;
+</pre></div><p>
+
+<p>The <tt>runOnFunction</tt> method must be implemented by your subclass to do
+the transformation or analysis work of your pass. As usual, a true value should
+be returned if the function is modified.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="doFinalization_mod">
+ The <tt>doFinalization(Module &amp;)</tt> method
+ </a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> doFinalization(Module &amp;M);
+</pre></div>
+
+<p>The <tt>doFinalization</tt> method is an infrequently used method that is
+called when the pass framework has finished calling <a
+href="#runOnFunction"><tt>runOnFunction</tt></a> for every function in the
+program being compiled.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="LoopPass">The <tt>LoopPass</tt> class </a>
+</h3>
+
+<div>
+
+<p> All <tt>LoopPass</tt> execute on each loop in the function independent of
+all of the other loops in the function. <tt>LoopPass</tt> processes loops in
+loop nest order such that outer most loop is processed last. </p>
+
+<p> <tt>LoopPass</tt> subclasses are allowed to update loop nest using
+<tt>LPPassManager</tt> interface. Implementing a loop pass is usually
+straightforward. <tt>LoopPass</tt>'s may overload three virtual methods to
+do their work. All these methods should return true if they modified the
+program, or false if they didn't. </p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="doInitialization_loop">
+ The <tt>doInitialization(Loop *,LPPassManager &amp;)</tt> method
+ </a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> doInitialization(Loop *, LPPassManager &amp;LPM);
+</pre></div>
+
+<p>The <tt>doInitialization</tt> method is designed to do simple initialization
+type of stuff that does not depend on the functions being processed. The
+<tt>doInitialization</tt> method call is not scheduled to overlap with any
+other pass executions (thus it should be very fast). LPPassManager
+interface should be used to access Function or Module level analysis
+information.</p>
+
+</div>
+
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="runOnLoop">The <tt>runOnLoop</tt> method</a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> runOnLoop(Loop *, LPPassManager &amp;LPM) = 0;
+</pre></div><p>
+
+<p>The <tt>runOnLoop</tt> method must be implemented by your subclass to do
+the transformation or analysis work of your pass. As usual, a true value should
+be returned if the function is modified. <tt>LPPassManager</tt> interface
+should be used to update loop nest.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="doFinalization_loop">The <tt>doFinalization()</tt> method</a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> doFinalization();
+</pre></div>
+
+<p>The <tt>doFinalization</tt> method is an infrequently used method that is
+called when the pass framework has finished calling <a
+href="#runOnLoop"><tt>runOnLoop</tt></a> for every loop in the
+program being compiled. </p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="RegionPass">The <tt>RegionPass</tt> class </a>
+</h3>
+
+<div>
+
+<p> <tt>RegionPass</tt> is similar to <a href="#LoopPass"><tt>LoopPass</tt></a>,
+but executes on each single entry single exit region in the function.
+<tt>RegionPass</tt> processes regions in nested order such that the outer most
+region is processed last. </p>
+
+<p> <tt>RegionPass</tt> subclasses are allowed to update the region tree by using
+the <tt>RGPassManager</tt> interface. You may overload three virtual methods of
+<tt>RegionPass</tt> to implement your own region pass. All these
+methods should return true if they modified the program, or false if they didn not.
+</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="doInitialization_region">
+ The <tt>doInitialization(Region *, RGPassManager &amp;)</tt> method
+ </a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> doInitialization(Region *, RGPassManager &amp;RGM);
+</pre></div>
+
+<p>The <tt>doInitialization</tt> method is designed to do simple initialization
+type of stuff that does not depend on the functions being processed. The
+<tt>doInitialization</tt> method call is not scheduled to overlap with any
+other pass executions (thus it should be very fast). RPPassManager
+interface should be used to access Function or Module level analysis
+information.</p>
+
+</div>
+
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="runOnRegion">The <tt>runOnRegion</tt> method</a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> runOnRegion(Region *, RGPassManager &amp;RGM) = 0;
+</pre></div><p>
+
+<p>The <tt>runOnRegion</tt> method must be implemented by your subclass to do
+the transformation or analysis work of your pass. As usual, a true value should
+be returned if the region is modified. <tt>RGPassManager</tt> interface
+should be used to update region tree.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="doFinalization_region">The <tt>doFinalization()</tt> method</a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> doFinalization();
+</pre></div>
+
+<p>The <tt>doFinalization</tt> method is an infrequently used method that is
+called when the pass framework has finished calling <a
+href="#runOnRegion"><tt>runOnRegion</tt></a> for every region in the
+program being compiled. </p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="BasicBlockPass">The <tt>BasicBlockPass</tt> class</a>
+</h3>
+
+<div>
+
+<p><tt>BasicBlockPass</tt>'s are just like <a
+href="#FunctionPass"><tt>FunctionPass</tt></a>'s, except that they must limit
+their scope of inspection and modification to a single basic block at a time.
+As such, they are <b>not</b> allowed to do any of the following:</p>
+
+<ol>
+<li>Modify or inspect any basic blocks outside of the current one</li>
+<li>Maintain state across invocations of
+ <a href="#runOnBasicBlock"><tt>runOnBasicBlock</tt></a></li>
+<li>Modify the control flow graph (by altering terminator instructions)</li>
+<li>Any of the things forbidden for
+ <a href="#FunctionPass"><tt>FunctionPass</tt></a>es.</li>
+</ol>
+
+<p><tt>BasicBlockPass</tt>es are useful for traditional local and "peephole"
+optimizations. They may override the same <a
+href="#doInitialization_mod"><tt>doInitialization(Module &amp;)</tt></a> and <a
+href="#doFinalization_mod"><tt>doFinalization(Module &amp;)</tt></a> methods that <a
+href="#FunctionPass"><tt>FunctionPass</tt></a>'s have, but also have the following virtual methods that may also be implemented:</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="doInitialization_fn">
+ The <tt>doInitialization(Function &amp;)</tt> method
+ </a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> doInitialization(Function &amp;F);
+</pre></div>
+
+<p>The <tt>doIninitialize</tt> method is allowed to do most of the things that
+<tt>BasicBlockPass</tt>'s are not allowed to do, but that
+<tt>FunctionPass</tt>'s can. The <tt>doInitialization</tt> method is designed
+to do simple initialization that does not depend on the
+BasicBlocks being processed. The <tt>doInitialization</tt> method call is not
+scheduled to overlap with any other pass executions (thus it should be very
+fast).</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="runOnBasicBlock">The <tt>runOnBasicBlock</tt> method</a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> runOnBasicBlock(BasicBlock &amp;BB) = 0;
+</pre></div>
+
+<p>Override this function to do the work of the <tt>BasicBlockPass</tt>. This
+function is not allowed to inspect or modify basic blocks other than the
+parameter, and are not allowed to modify the CFG. A true value must be returned
+if the basic block is modified.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="doFinalization_fn">
+ The <tt>doFinalization(Function &amp;)</tt> method
+ </a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> doFinalization(Function &amp;F);
+</pre></div>
+
+<p>The <tt>doFinalization</tt> method is an infrequently used method that is
+called when the pass framework has finished calling <a
+href="#runOnBasicBlock"><tt>runOnBasicBlock</tt></a> for every BasicBlock in the
+program being compiled. This can be used to perform per-function
+finalization.</p>
+
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h3>
+ <a name="MachineFunctionPass">The <tt>MachineFunctionPass</tt> class</a>
+</h3>
+
+<div>
+
+<p>A <tt>MachineFunctionPass</tt> is a part of the LLVM code generator that
+executes on the machine-dependent representation of each LLVM function in the
+program.</p>
+
+<p>Code generator passes are registered and initialized specially by
+<tt>TargetMachine::addPassesToEmitFile</tt> and similar routines, so they
+cannot generally be run from the <tt>opt</tt> or <tt>bugpoint</tt>
+commands.</p>
+
+<p>A <tt>MachineFunctionPass</tt> is also a <tt>FunctionPass</tt>, so all
+the restrictions that apply to a <tt>FunctionPass</tt> also apply to it.
+<tt>MachineFunctionPass</tt>es also have additional restrictions. In particular,
+<tt>MachineFunctionPass</tt>es are not allowed to do any of the following:</p>
+
+<ol>
+<li>Modify or create any LLVM IR Instructions, BasicBlocks, Arguments,
+ Functions, GlobalVariables, GlobalAliases, or Modules.</li>
+<li>Modify a MachineFunction other than the one currently being processed.</li>
+<li>Maintain state across invocations of <a
+href="#runOnMachineFunction"><tt>runOnMachineFunction</tt></a> (including global
+data)</li>
+</ol>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="runOnMachineFunction">
+ The <tt>runOnMachineFunction(MachineFunction &amp;MF)</tt> method
+ </a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual bool</b> runOnMachineFunction(MachineFunction &amp;MF) = 0;
+</pre></div>
+
+<p><tt>runOnMachineFunction</tt> can be considered the main entry point of a
+<tt>MachineFunctionPass</tt>; that is, you should override this method to do the
+work of your <tt>MachineFunctionPass</tt>.</p>
+
+<p>The <tt>runOnMachineFunction</tt> method is called on every
+<tt>MachineFunction</tt> in a <tt>Module</tt>, so that the
+<tt>MachineFunctionPass</tt> may perform optimizations on the machine-dependent
+representation of the function. If you want to get at the LLVM <tt>Function</tt>
+for the <tt>MachineFunction</tt> you're working on, use
+<tt>MachineFunction</tt>'s <tt>getFunction()</tt> accessor method -- but
+remember, you may not modify the LLVM <tt>Function</tt> or its contents from a
+<tt>MachineFunctionPass</tt>.</p>
+
+</div>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="registration">Pass registration</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>In the <a href="#basiccode">Hello World</a> example pass we illustrated how
+pass registration works, and discussed some of the reasons that it is used and
+what it does. Here we discuss how and why passes are registered.</p>
+
+<p>As we saw above, passes are registered with the <b><tt>RegisterPass</tt></b>
+template. The template parameter is the name of the pass that is to be used on
+the command line to specify that the pass should be added to a program (for
+example, with <tt>opt</tt> or <tt>bugpoint</tt>). The first argument is the
+name of the pass, which is to be used for the <tt>-help</tt> output of
+programs, as
+well as for debug output generated by the <tt>--debug-pass</tt> option.</p>
+
+<p>If you want your pass to be easily dumpable, you should
+implement the virtual <tt>print</tt> method:</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="print">The <tt>print</tt> method</a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual void</b> print(std::ostream &amp;O, <b>const</b> Module *M) <b>const</b>;
+</pre></div>
+
+<p>The <tt>print</tt> method must be implemented by "analyses" in order to print
+a human readable version of the analysis results. This is useful for debugging
+an analysis itself, as well as for other people to figure out how an analysis
+works. Use the <tt>opt -analyze</tt> argument to invoke this method.</p>
+
+<p>The <tt>llvm::OStream</tt> parameter specifies the stream to write the results on,
+and the <tt>Module</tt> parameter gives a pointer to the top level module of the
+program that has been analyzed. Note however that this pointer may be null in
+certain circumstances (such as calling the <tt>Pass::dump()</tt> from a
+debugger), so it should only be used to enhance debug output, it should not be
+depended on.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="interaction">Specifying interactions between passes</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>One of the main responsibilities of the <tt>PassManager</tt> is to make sure
+that passes interact with each other correctly. Because <tt>PassManager</tt>
+tries to <a href="#passmanager">optimize the execution of passes</a> it must
+know how the passes interact with each other and what dependencies exist between
+the various passes. To track this, each pass can declare the set of passes that
+are required to be executed before the current pass, and the passes which are
+invalidated by the current pass.</p>
+
+<p>Typically this functionality is used to require that analysis results are
+computed before your pass is run. Running arbitrary transformation passes can
+invalidate the computed analysis results, which is what the invalidation set
+specifies. If a pass does not implement the <tt><a
+href="#getAnalysisUsage">getAnalysisUsage</a></tt> method, it defaults to not
+having any prerequisite passes, and invalidating <b>all</b> other passes.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="getAnalysisUsage">The <tt>getAnalysisUsage</tt> method</a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<b>virtual void</b> getAnalysisUsage(AnalysisUsage &amp;Info) <b>const</b>;
+</pre></div>
+
+<p>By implementing the <tt>getAnalysisUsage</tt> method, the required and
+invalidated sets may be specified for your transformation. The implementation
+should fill in the <tt><a
+href="http://llvm.org/doxygen/classllvm_1_1AnalysisUsage.html">AnalysisUsage</a></tt>
+object with information about which passes are required and not invalidated. To
+do this, a pass may call any of the following methods on the AnalysisUsage
+object:</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="AU::addRequired">
+ The <tt>AnalysisUsage::addRequired&lt;&gt;</tt>
+ and <tt>AnalysisUsage::addRequiredTransitive&lt;&gt;</tt> methods
+ </a>
+</h4>
+
+<div>
+<p>
+If your pass requires a previous pass to be executed (an analysis for example),
+it can use one of these methods to arrange for it to be run before your pass.
+LLVM has many different types of analyses and passes that can be required,
+spanning the range from <tt>DominatorSet</tt> to <tt>BreakCriticalEdges</tt>.
+Requiring <tt>BreakCriticalEdges</tt>, for example, guarantees that there will
+be no critical edges in the CFG when your pass has been run.
+</p>
+
+<p>
+Some analyses chain to other analyses to do their job. For example, an <a
+href="AliasAnalysis.html">AliasAnalysis</a> implementation is required to <a
+href="AliasAnalysis.html#chaining">chain</a> to other alias analysis passes. In
+cases where analyses chain, the <tt>addRequiredTransitive</tt> method should be
+used instead of the <tt>addRequired</tt> method. This informs the PassManager
+that the transitively required pass should be alive as long as the requiring
+pass is.
+</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="AU::addPreserved">
+ The <tt>AnalysisUsage::addPreserved&lt;&gt;</tt> method
+ </a>
+</h4>
+
+<div>
+<p>
+One of the jobs of the PassManager is to optimize how and when analyses are run.
+In particular, it attempts to avoid recomputing data unless it needs to. For
+this reason, passes are allowed to declare that they preserve (i.e., they don't
+invalidate) an existing analysis if it's available. For example, a simple
+constant folding pass would not modify the CFG, so it can't possibly affect the
+results of dominator analysis. By default, all passes are assumed to invalidate
+all others.
+</p>
+
+<p>
+The <tt>AnalysisUsage</tt> class provides several methods which are useful in
+certain circumstances that are related to <tt>addPreserved</tt>. In particular,
+the <tt>setPreservesAll</tt> method can be called to indicate that the pass does
+not modify the LLVM program at all (which is true for analyses), and the
+<tt>setPreservesCFG</tt> method can be used by transformations that change
+instructions in the program but do not modify the CFG or terminator instructions
+(note that this property is implicitly set for <a
+href="#BasicBlockPass">BasicBlockPass</a>'s).
+</p>
+
+<p>
+<tt>addPreserved</tt> is particularly useful for transformations like
+<tt>BreakCriticalEdges</tt>. This pass knows how to update a small set of loop
+and dominator related analyses if they exist, so it can preserve them, despite
+the fact that it hacks on the CFG.
+</p>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="AU::examples">
+ Example implementations of <tt>getAnalysisUsage</tt>
+ </a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+<i>// This example modifies the program, but does not modify the CFG</i>
+<b>void</b> <a href="http://llvm.org/doxygen/structLICM.html">LICM</a>::getAnalysisUsage(AnalysisUsage &amp;AU) <b>const</b> {
+ AU.setPreservesCFG();
+ AU.addRequired&lt;<a href="http://llvm.org/doxygen/classllvm_1_1LoopInfo.html">LoopInfo</a>&gt;();
+}
+</pre></div>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="getAnalysis">
+ The <tt>getAnalysis&lt;&gt;</tt> and
+ <tt>getAnalysisIfAvailable&lt;&gt;</tt> methods
+ </a>
+</h4>
+
+<div>
+
+<p>The <tt>Pass::getAnalysis&lt;&gt;</tt> method is automatically inherited by
+your class, providing you with access to the passes that you declared that you
+required with the <a href="#getAnalysisUsage"><tt>getAnalysisUsage</tt></a>
+method. It takes a single template argument that specifies which pass class you
+want, and returns a reference to that pass. For example:</p>
+
+<div class="doc_code"><pre>
+bool LICM::runOnFunction(Function &amp;F) {
+ LoopInfo &amp;LI = getAnalysis&lt;LoopInfo&gt;();
+ ...
+}
+</pre></div>
+
+<p>This method call returns a reference to the pass desired. You may get a
+runtime assertion failure if you attempt to get an analysis that you did not
+declare as required in your <a
+href="#getAnalysisUsage"><tt>getAnalysisUsage</tt></a> implementation. This
+method can be called by your <tt>run*</tt> method implementation, or by any
+other local method invoked by your <tt>run*</tt> method.
+
+A module level pass can use function level analysis info using this interface.
+For example:</p>
+
+<div class="doc_code"><pre>
+bool ModuleLevelPass::runOnModule(Module &amp;M) {
+ ...
+ DominatorTree &amp;DT = getAnalysis&lt;DominatorTree&gt;(Func);
+ ...
+}
+</pre></div>
+
+<p>In above example, runOnFunction for DominatorTree is called by pass manager
+before returning a reference to the desired pass.</p>
+
+<p>
+If your pass is capable of updating analyses if they exist (e.g.,
+<tt>BreakCriticalEdges</tt>, as described above), you can use the
+<tt>getAnalysisIfAvailable</tt> method, which returns a pointer to the analysis
+if it is active. For example:</p>
+
+<div class="doc_code"><pre>
+...
+if (DominatorSet *DS = getAnalysisIfAvailable&lt;DominatorSet&gt;()) {
+ <i>// A DominatorSet is active. This code will update it.</i>
+}
+...
+</pre></div>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="analysisgroup">Implementing Analysis Groups</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Now that we understand the basics of how passes are defined, how they are
+used, and how they are required from other passes, it's time to get a little bit
+fancier. All of the pass relationships that we have seen so far are very
+simple: one pass depends on one other specific pass to be run before it can run.
+For many applications, this is great, for others, more flexibility is
+required.</p>
+
+<p>In particular, some analyses are defined such that there is a single simple
+interface to the analysis results, but multiple ways of calculating them.
+Consider alias analysis for example. The most trivial alias analysis returns
+"may alias" for any alias query. The most sophisticated analysis a
+flow-sensitive, context-sensitive interprocedural analysis that can take a
+significant amount of time to execute (and obviously, there is a lot of room
+between these two extremes for other implementations). To cleanly support
+situations like this, the LLVM Pass Infrastructure supports the notion of
+Analysis Groups.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="agconcepts">Analysis Group Concepts</a>
+</h4>
+
+<div>
+
+<p>An Analysis Group is a single simple interface that may be implemented by
+multiple different passes. Analysis Groups can be given human readable names
+just like passes, but unlike passes, they need not derive from the <tt>Pass</tt>
+class. An analysis group may have one or more implementations, one of which is
+the "default" implementation.</p>
+
+<p>Analysis groups are used by client passes just like other passes are: the
+<tt>AnalysisUsage::addRequired()</tt> and <tt>Pass::getAnalysis()</tt> methods.
+In order to resolve this requirement, the <a href="#passmanager">PassManager</a>
+scans the available passes to see if any implementations of the analysis group
+are available. If none is available, the default implementation is created for
+the pass to use. All standard rules for <A href="#interaction">interaction
+between passes</a> still apply.</p>
+
+<p>Although <a href="#registration">Pass Registration</a> is optional for normal
+passes, all analysis group implementations must be registered, and must use the
+<A href="#registerag"><tt>INITIALIZE_AG_PASS</tt></a> template to join the
+implementation pool. Also, a default implementation of the interface
+<b>must</b> be registered with <A
+href="#registerag"><tt>RegisterAnalysisGroup</tt></a>.</p>
+
+<p>As a concrete example of an Analysis Group in action, consider the <a
+href="http://llvm.org/doxygen/classllvm_1_1AliasAnalysis.html">AliasAnalysis</a>
+analysis group. The default implementation of the alias analysis interface (the
+<tt><a
+href="http://llvm.org/doxygen/structBasicAliasAnalysis.html">basicaa</a></tt>
+pass) just does a few simple checks that don't require significant analysis to
+compute (such as: two different globals can never alias each other, etc).
+Passes that use the <tt><a
+href="http://llvm.org/doxygen/classllvm_1_1AliasAnalysis.html">AliasAnalysis</a></tt>
+interface (for example the <tt><a
+href="http://llvm.org/doxygen/structGCSE.html">gcse</a></tt> pass), do
+not care which implementation of alias analysis is actually provided, they just
+use the designated interface.</p>
+
+<p>From the user's perspective, commands work just like normal. Issuing the
+command '<tt>opt -gcse ...</tt>' will cause the <tt>basicaa</tt> class to be
+instantiated and added to the pass sequence. Issuing the command '<tt>opt
+-somefancyaa -gcse ...</tt>' will cause the <tt>gcse</tt> pass to use the
+<tt>somefancyaa</tt> alias analysis (which doesn't actually exist, it's just a
+hypothetical example) instead.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="registerag">Using <tt>RegisterAnalysisGroup</tt></a>
+</h4>
+
+<div>
+
+<p>The <tt>RegisterAnalysisGroup</tt> template is used to register the analysis
+group itself, while the <tt>INITIALIZE_AG_PASS</tt> is used to add pass
+implementations to the analysis group. First,
+an analysis group should be registered, with a human readable name
+provided for it.
+Unlike registration of passes, there is no command line argument to be specified
+for the Analysis Group Interface itself, because it is "abstract":</p>
+
+<div class="doc_code"><pre>
+<b>static</b> RegisterAnalysisGroup&lt;<a href="http://llvm.org/doxygen/classllvm_1_1AliasAnalysis.html">AliasAnalysis</a>&gt; A("<i>Alias Analysis</i>");
+</pre></div>
+
+<p>Once the analysis is registered, passes can declare that they are valid
+implementations of the interface by using the following code:</p>
+
+<div class="doc_code"><pre>
+<b>namespace</b> {
+ //<i> Declare that we implement the AliasAnalysis interface</i>
+ INITIALIZE_AG_PASS(FancyAA, <a href="http://llvm.org/doxygen/classllvm_1_1AliasAnalysis.html">AliasAnalysis</a>, "<i>somefancyaa</i>",
+ "<i>A more complex alias analysis implementation</i>",
+ false, // <i>Is CFG Only?</i>
+ true, // <i>Is Analysis?</i>
+ false); // <i>Is default Analysis Group implementation?</i>
+}
+</pre></div>
+
+<p>This just shows a class <tt>FancyAA</tt> that
+uses the <tt>INITIALIZE_AG_PASS</tt> macro both to register and
+to "join" the <tt><a href="http://llvm.org/doxygen/classllvm_1_1AliasAnalysis.html">AliasAnalysis</a></tt>
+analysis group. Every implementation of an analysis group should join using
+this macro.</p>
+
+<div class="doc_code"><pre>
+<b>namespace</b> {
+ //<i> Declare that we implement the AliasAnalysis interface</i>
+ INITIALIZE_AG_PASS(BasicAA, <a href="http://llvm.org/doxygen/classllvm_1_1AliasAnalysis.html">AliasAnalysis</a>, "<i>basicaa</i>",
+ "<i>Basic Alias Analysis (default AA impl)</i>",
+ false, // <i>Is CFG Only?</i>
+ true, // <i>Is Analysis?</i>
+ true); // <i>Is default Analysis Group implementation?</i>
+}
+</pre></div>
+
+<p>Here we show how the default implementation is specified (using the final
+argument to the <tt>INITIALIZE_AG_PASS</tt> template). There must be exactly
+one default implementation available at all times for an Analysis Group to be
+used. Only default implementation can derive from <tt>ImmutablePass</tt>.
+Here we declare that the
+ <tt><a href="http://llvm.org/doxygen/structBasicAliasAnalysis.html">BasicAliasAnalysis</a></tt>
+pass is the default implementation for the interface.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="passStatistics">Pass Statistics</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+<p>The <a
+href="http://llvm.org/doxygen/Statistic_8h-source.html"><tt>Statistic</tt></a>
+class is designed to be an easy way to expose various success
+metrics from passes. These statistics are printed at the end of a
+run, when the -stats command line option is enabled on the command
+line. See the <a href="http://llvm.org/docs/ProgrammersManual.html#Statistic">Statistics section</a> in the Programmer's Manual for details.
+
+</div>
+
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="passmanager">What PassManager does</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The <a
+href="http://llvm.org/doxygen/PassManager_8h-source.html"><tt>PassManager</tt></a>
+<a
+href="http://llvm.org/doxygen/classllvm_1_1PassManager.html">class</a>
+takes a list of passes, ensures their <a href="#interaction">prerequisites</a>
+are set up correctly, and then schedules passes to run efficiently. All of the
+LLVM tools that run passes use the <tt>PassManager</tt> for execution of these
+passes.</p>
+
+<p>The <tt>PassManager</tt> does two main things to try to reduce the execution
+time of a series of passes:</p>
+
+<ol>
+<li><b>Share analysis results</b> - The PassManager attempts to avoid
+recomputing analysis results as much as possible. This means keeping track of
+which analyses are available already, which analyses get invalidated, and which
+analyses are needed to be run for a pass. An important part of work is that the
+<tt>PassManager</tt> tracks the exact lifetime of all analysis results, allowing
+it to <a href="#releaseMemory">free memory</a> allocated to holding analysis
+results as soon as they are no longer needed.</li>
+
+<li><b>Pipeline the execution of passes on the program</b> - The
+<tt>PassManager</tt> attempts to get better cache and memory usage behavior out
+of a series of passes by pipelining the passes together. This means that, given
+a series of consecutive <a href="#FunctionPass"><tt>FunctionPass</tt></a>'s, it
+will execute all of the <a href="#FunctionPass"><tt>FunctionPass</tt></a>'s on
+the first function, then all of the <a
+href="#FunctionPass"><tt>FunctionPass</tt></a>es on the second function,
+etc... until the entire program has been run through the passes.
+
+<p>This improves the cache behavior of the compiler, because it is only touching
+the LLVM program representation for a single function at a time, instead of
+traversing the entire program. It reduces the memory consumption of compiler,
+because, for example, only one <a
+href="http://llvm.org/doxygen/classllvm_1_1DominatorSet.html"><tt>DominatorSet</tt></a>
+needs to be calculated at a time. This also makes it possible to implement
+some <a
+href="#SMP">interesting enhancements</a> in the future.</p></li>
+
+</ol>
+
+<p>The effectiveness of the <tt>PassManager</tt> is influenced directly by how
+much information it has about the behaviors of the passes it is scheduling. For
+example, the "preserved" set is intentionally conservative in the face of an
+unimplemented <a href="#getAnalysisUsage"><tt>getAnalysisUsage</tt></a> method.
+Not implementing when it should be implemented will have the effect of not
+allowing any analysis results to live across the execution of your pass.</p>
+
+<p>The <tt>PassManager</tt> class exposes a <tt>--debug-pass</tt> command line
+options that is useful for debugging pass execution, seeing how things work, and
+diagnosing when you should be preserving more analyses than you currently are
+(To get information about all of the variants of the <tt>--debug-pass</tt>
+option, just type '<tt>opt -help-hidden</tt>').</p>
+
+<p>By using the <tt>--debug-pass=Structure</tt> option, for example, we can see
+how our <a href="#basiccode">Hello World</a> pass interacts with other passes.
+Lets try it out with the <tt>gcse</tt> and <tt>licm</tt> passes:</p>
+
+<div class="doc_code"><pre>
+$ opt -load ../../../Debug+Asserts/lib/Hello.so -gcse -licm --debug-pass=Structure &lt; hello.bc &gt; /dev/null
+Module Pass Manager
+ Function Pass Manager
+ Dominator Set Construction
+ Immediate Dominators Construction
+ Global Common Subexpression Elimination
+-- Immediate Dominators Construction
+-- Global Common Subexpression Elimination
+ Natural Loop Construction
+ Loop Invariant Code Motion
+-- Natural Loop Construction
+-- Loop Invariant Code Motion
+ Module Verifier
+-- Dominator Set Construction
+-- Module Verifier
+ Bitcode Writer
+--Bitcode Writer
+</pre></div>
+
+<p>This output shows us when passes are constructed and when the analysis
+results are known to be dead (prefixed with '<tt>--</tt>'). Here we see that
+GCSE uses dominator and immediate dominator information to do its job. The LICM
+pass uses natural loop information, which uses dominator sets, but not immediate
+dominators. Because immediate dominators are no longer useful after the GCSE
+pass, it is immediately destroyed. The dominator sets are then reused to
+compute natural loop information, which is then used by the LICM pass.</p>
+
+<p>After the LICM pass, the module verifier runs (which is automatically added
+by the '<tt>opt</tt>' tool), which uses the dominator set to check that the
+resultant LLVM code is well formed. After it finishes, the dominator set
+information is destroyed, after being computed once, and shared by three
+passes.</p>
+
+<p>Lets see how this changes when we run the <a href="#basiccode">Hello
+World</a> pass in between the two passes:</p>
+
+<div class="doc_code"><pre>
+$ opt -load ../../../Debug+Asserts/lib/Hello.so -gcse -hello -licm --debug-pass=Structure &lt; hello.bc &gt; /dev/null
+Module Pass Manager
+ Function Pass Manager
+ Dominator Set Construction
+ Immediate Dominators Construction
+ Global Common Subexpression Elimination
+<b>-- Dominator Set Construction</b>
+-- Immediate Dominators Construction
+-- Global Common Subexpression Elimination
+<b> Hello World Pass
+-- Hello World Pass
+ Dominator Set Construction</b>
+ Natural Loop Construction
+ Loop Invariant Code Motion
+-- Natural Loop Construction
+-- Loop Invariant Code Motion
+ Module Verifier
+-- Dominator Set Construction
+-- Module Verifier
+ Bitcode Writer
+--Bitcode Writer
+Hello: __main
+Hello: puts
+Hello: main
+</pre></div>
+
+<p>Here we see that the <a href="#basiccode">Hello World</a> pass has killed the
+Dominator Set pass, even though it doesn't modify the code at all! To fix this,
+we need to add the following <a
+href="#getAnalysisUsage"><tt>getAnalysisUsage</tt></a> method to our pass:</p>
+
+<div class="doc_code"><pre>
+<i>// We don't modify the program, so we preserve all analyses</i>
+<b>virtual void</b> getAnalysisUsage(AnalysisUsage &amp;AU) <b>const</b> {
+ AU.setPreservesAll();
+}
+</pre></div>
+
+<p>Now when we run our pass, we get this output:</p>
+
+<div class="doc_code"><pre>
+$ opt -load ../../../Debug+Asserts/lib/Hello.so -gcse -hello -licm --debug-pass=Structure &lt; hello.bc &gt; /dev/null
+Pass Arguments: -gcse -hello -licm
+Module Pass Manager
+ Function Pass Manager
+ Dominator Set Construction
+ Immediate Dominators Construction
+ Global Common Subexpression Elimination
+-- Immediate Dominators Construction
+-- Global Common Subexpression Elimination
+ Hello World Pass
+-- Hello World Pass
+ Natural Loop Construction
+ Loop Invariant Code Motion
+-- Loop Invariant Code Motion
+-- Natural Loop Construction
+ Module Verifier
+-- Dominator Set Construction
+-- Module Verifier
+ Bitcode Writer
+--Bitcode Writer
+Hello: __main
+Hello: puts
+Hello: main
+</pre></div>
+
+<p>Which shows that we don't accidentally invalidate dominator information
+anymore, and therefore do not have to compute it twice.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="releaseMemory">The <tt>releaseMemory</tt> method</a>
+</h4>
+
+<div>
+
+<div class="doc_code"><pre>
+ <b>virtual void</b> releaseMemory();
+</pre></div>
+
+<p>The <tt>PassManager</tt> automatically determines when to compute analysis
+results, and how long to keep them around for. Because the lifetime of the pass
+object itself is effectively the entire duration of the compilation process, we
+need some way to free analysis results when they are no longer useful. The
+<tt>releaseMemory</tt> virtual method is the way to do this.</p>
+
+<p>If you are writing an analysis or any other pass that retains a significant
+amount of state (for use by another pass which "requires" your pass and uses the
+<a href="#getAnalysis">getAnalysis</a> method) you should implement
+<tt>releaseMemory</tt> to, well, release the memory allocated to maintain this
+internal state. This method is called after the <tt>run*</tt> method for the
+class, before the next call of <tt>run*</tt> in your pass.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="registering">Registering dynamically loaded passes</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p><i>Size matters</i> when constructing production quality tools using llvm,
+both for the purposes of distribution, and for regulating the resident code size
+when running on the target system. Therefore, it becomes desirable to
+selectively use some passes, while omitting others and maintain the flexibility
+to change configurations later on. You want to be able to do all this, and,
+provide feedback to the user. This is where pass registration comes into
+play.</p>
+
+<p>The fundamental mechanisms for pass registration are the
+<tt>MachinePassRegistry</tt> class and subclasses of
+<tt>MachinePassRegistryNode</tt>.</p>
+
+<p>An instance of <tt>MachinePassRegistry</tt> is used to maintain a list of
+<tt>MachinePassRegistryNode</tt> objects. This instance maintains the list and
+communicates additions and deletions to the command line interface.</p>
+
+<p>An instance of <tt>MachinePassRegistryNode</tt> subclass is used to maintain
+information provided about a particular pass. This information includes the
+command line name, the command help string and the address of the function used
+to create an instance of the pass. A global static constructor of one of these
+instances <i>registers</i> with a corresponding <tt>MachinePassRegistry</tt>,
+the static destructor <i>unregisters</i>. Thus a pass that is statically linked
+in the tool will be registered at start up. A dynamically loaded pass will
+register on load and unregister at unload.</p>
+
+<!-- _______________________________________________________________________ -->
+<h3>
+ <a name="registering_existing">Using existing registries</a>
+</h3>
+
+<div>
+
+<p>There are predefined registries to track instruction scheduling
+(<tt>RegisterScheduler</tt>) and register allocation (<tt>RegisterRegAlloc</tt>)
+machine passes. Here we will describe how to <i>register</i> a register
+allocator machine pass.</p>
+
+<p>Implement your register allocator machine pass. In your register allocator
+<tt>.cpp</tt> file add the following include;</p>
+
+<div class="doc_code"><pre>
+#include "llvm/CodeGen/RegAllocRegistry.h"
+</pre></div>
+
+<p>Also in your register allocator .cpp file, define a creator function in the
+form; </p>
+
+<div class="doc_code"><pre>
+FunctionPass *createMyRegisterAllocator() {
+ return new MyRegisterAllocator();
+}
+</pre></div>
+
+<p>Note that the signature of this function should match the type of
+<tt>RegisterRegAlloc::FunctionPassCtor</tt>. In the same file add the
+"installing" declaration, in the form;</p>
+
+<div class="doc_code"><pre>
+static RegisterRegAlloc myRegAlloc("myregalloc",
+ "my register allocator help string",
+ createMyRegisterAllocator);
+</pre></div>
+
+<p>Note the two spaces prior to the help string produces a tidy result on the
+-help query.</p>
+
+<div class="doc_code"><pre>
+$ llc -help
+ ...
+ -regalloc - Register allocator to use (default=linearscan)
+ =linearscan - linear scan register allocator
+ =local - local register allocator
+ =simple - simple register allocator
+ =myregalloc - my register allocator help string
+ ...
+</pre></div>
+
+<p>And that's it. The user is now free to use <tt>-regalloc=myregalloc</tt> as
+an option. Registering instruction schedulers is similar except use the
+<tt>RegisterScheduler</tt> class. Note that the
+<tt>RegisterScheduler::FunctionPassCtor</tt> is significantly different from
+<tt>RegisterRegAlloc::FunctionPassCtor</tt>.</p>
+
+<p>To force the load/linking of your register allocator into the llc/lli tools,
+add your creator function's global declaration to "Passes.h" and add a "pseudo"
+call line to <tt>llvm/Codegen/LinkAllCodegenComponents.h</tt>.</p>
+
+</div>
+
+
+<!-- _______________________________________________________________________ -->
+<h3>
+ <a name="registering_new">Creating new registries</a>
+</h3>
+
+<div>
+
+<p>The easiest way to get started is to clone one of the existing registries; we
+recommend <tt>llvm/CodeGen/RegAllocRegistry.h</tt>. The key things to modify
+are the class name and the <tt>FunctionPassCtor</tt> type.</p>
+
+<p>Then you need to declare the registry. Example: if your pass registry is
+<tt>RegisterMyPasses</tt> then define;</p>
+
+<div class="doc_code"><pre>
+MachinePassRegistry RegisterMyPasses::Registry;
+</pre></div>
+
+<p>And finally, declare the command line option for your passes. Example:</p>
+
+<div class="doc_code"><pre>
+cl::opt&lt;RegisterMyPasses::FunctionPassCtor, false,
+ RegisterPassParser&lt;RegisterMyPasses&gt; &gt;
+MyPassOpt("mypass",
+ cl::init(&amp;createDefaultMyPass),
+ cl::desc("my pass option help"));
+</pre></div>
+
+<p>Here the command option is "mypass", with createDefaultMyPass as the default
+creator.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="debughints">Using GDB with dynamically loaded passes</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Unfortunately, using GDB with dynamically loaded passes is not as easy as it
+should be. First of all, you can't set a breakpoint in a shared object that has
+not been loaded yet, and second of all there are problems with inlined functions
+in shared objects. Here are some suggestions to debugging your pass with
+GDB.</p>
+
+<p>For sake of discussion, I'm going to assume that you are debugging a
+transformation invoked by <tt>opt</tt>, although nothing described here depends
+on that.</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="breakpoint">Setting a breakpoint in your pass</a>
+</h4>
+
+<div>
+
+<p>First thing you do is start <tt>gdb</tt> on the <tt>opt</tt> process:</p>
+
+<div class="doc_code"><pre>
+$ <b>gdb opt</b>
+GNU gdb 5.0
+Copyright 2000 Free Software Foundation, Inc.
+GDB is free software, covered by the GNU General Public License, and you are
+welcome to change it and/or distribute copies of it under certain conditions.
+Type "show copying" to see the conditions.
+There is absolutely no warranty for GDB. Type "show warranty" for details.
+This GDB was configured as "sparc-sun-solaris2.6"...
+(gdb)
+</pre></div>
+
+<p>Note that <tt>opt</tt> has a lot of debugging information in it, so it takes
+time to load. Be patient. Since we cannot set a breakpoint in our pass yet
+(the shared object isn't loaded until runtime), we must execute the process, and
+have it stop before it invokes our pass, but after it has loaded the shared
+object. The most foolproof way of doing this is to set a breakpoint in
+<tt>PassManager::run</tt> and then run the process with the arguments you
+want:</p>
+
+<div class="doc_code"><pre>
+(gdb) <b>break llvm::PassManager::run</b>
+Breakpoint 1 at 0x2413bc: file Pass.cpp, line 70.
+(gdb) <b>run test.bc -load $(LLVMTOP)/llvm/Debug+Asserts/lib/[libname].so -[passoption]</b>
+Starting program: opt test.bc -load $(LLVMTOP)/llvm/Debug+Asserts/lib/[libname].so -[passoption]
+Breakpoint 1, PassManager::run (this=0xffbef174, M=@0x70b298) at Pass.cpp:70
+70 bool PassManager::run(Module &amp;M) { return PM-&gt;run(M); }
+(gdb)
+</pre></div>
+
+<p>Once the <tt>opt</tt> stops in the <tt>PassManager::run</tt> method you are
+now free to set breakpoints in your pass so that you can trace through execution
+or do other standard debugging stuff.</p>
+
+</div>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="debugmisc">Miscellaneous Problems</a>
+</h4>
+
+<div>
+
+<p>Once you have the basics down, there are a couple of problems that GDB has,
+some with solutions, some without.</p>
+
+<ul>
+<li>Inline functions have bogus stack information. In general, GDB does a
+pretty good job getting stack traces and stepping through inline functions.
+When a pass is dynamically loaded however, it somehow completely loses this
+capability. The only solution I know of is to de-inline a function (move it
+from the body of a class to a .cpp file).</li>
+
+<li>Restarting the program breaks breakpoints. After following the information
+above, you have succeeded in getting some breakpoints planted in your pass. Nex
+thing you know, you restart the program (i.e., you type '<tt>run</tt>' again),
+and you start getting errors about breakpoints being unsettable. The only way I
+have found to "fix" this problem is to <tt>delete</tt> the breakpoints that are
+already set in your pass, run the program, and re-set the breakpoints once
+execution stops in <tt>PassManager::run</tt>.</li>
+
+</ul>
+
+<p>Hopefully these tips will help with common case debugging situations. If
+you'd like to contribute some tips of your own, just contact <a
+href="mailto:sabre@nondot.org">Chris</a>.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2>
+ <a name="future">Future extensions planned</a>
+</h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Although the LLVM Pass Infrastructure is very capable as it stands, and does
+some nifty stuff, there are things we'd like to add in the future. Here is
+where we are going:</p>
+
+<!-- _______________________________________________________________________ -->
+<h4>
+ <a name="SMP">Multithreaded LLVM</a>
+</h4>
+
+<div>
+
+<p>Multiple CPU machines are becoming more common and compilation can never be
+fast enough: obviously we should allow for a multithreaded compiler. Because of
+the semantics defined for passes above (specifically they cannot maintain state
+across invocations of their <tt>run*</tt> methods), a nice clean way to
+implement a multithreaded compiler would be for the <tt>PassManager</tt> class
+to create multiple instances of each pass object, and allow the separate
+instances to be hacking on different parts of the program at the same time.</p>
+
+<p>This implementation would prevent each of the passes from having to implement
+multithreaded constructs, requiring only the LLVM core to have locking in a few
+places (for global resources). Although this is a simple extension, we simply
+haven't had time (or multiprocessor machines, thus a reason) to implement this.
+Despite that, we have kept the LLVM passes SMP ready, and you should too.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+
+</body>
+</html>
diff --git a/docs/_static/lines.gif b/docs/_static/lines.gif
new file mode 100644
index 00000000000..88f491edc30
--- /dev/null
+++ b/docs/_static/lines.gif
Binary files differ
diff --git a/docs/_static/llvm.css b/docs/_static/llvm.css
new file mode 100644
index 00000000000..d7b5dae5a93
--- /dev/null
+++ b/docs/_static/llvm.css
@@ -0,0 +1,112 @@
+/*
+ * LLVM documentation style sheet
+ */
+
+/* Common styles */
+.body { color: black; background: white; margin: 0 0 0 0 }
+
+/* No borders on image links */
+a:link img, a:visited img { border-style: none }
+
+address img { float: right; width: 88px; height: 31px; }
+address { clear: right; }
+
+table { text-align: center; border: 2px solid black;
+ border-collapse: collapse; margin-top: 1em; margin-left: 1em;
+ margin-right: 1em; margin-bottom: 1em; }
+tr, td { border: 2px solid gray; padding: 4pt 4pt 2pt 2pt; }
+th { border: 2px solid gray; font-weight: bold; font-size: 105%;
+ background: url("lines.gif");
+ font-family: "Georgia,Palatino,Times,Roman,SanSerif";
+ text-align: center; vertical-align: middle; }
+/*
+ * Documentation
+ */
+/* Common for title and header */
+.doc_title, .doc_section, .doc_subsection, h1, h2, h3 {
+ color: black; background: url("lines.gif");
+ font-family: "Georgia,Palatino,Times,Roman,SanSerif"; font-weight: bold;
+ border-width: 1px;
+ border-style: solid none solid none;
+ text-align: center;
+ vertical-align: middle;
+ padding-left: 8pt;
+ padding-top: 1px;
+ padding-bottom: 2px
+}
+
+h1, .doc_title, .title { text-align: left; font-size: 25pt }
+
+h2, .doc_section { text-align: center; font-size: 22pt;
+ margin: 20pt 0pt 5pt 0pt; }
+
+h3, .doc_subsection { width: 75%;
+ text-align: left; font-size: 12pt;
+ padding: 4pt 4pt 4pt 4pt;
+ margin: 1.5em 0.5em 0.5em 0.5em }
+
+h4, .doc_subsubsection { margin: 2.0em 0.5em 0.5em 0.5em;
+ font-weight: bold; font-style: oblique;
+ border-bottom: 1px solid #999999; font-size: 12pt;
+ width: 75%; }
+
+.doc_author { text-align: left; font-weight: bold; padding-left: 20pt }
+.doc_text { text-align: left; padding-left: 20pt; padding-right: 10pt }
+
+.doc_footer { text-align: left; padding: 0 0 0 0 }
+
+.doc_hilite { color: blue; font-weight: bold; }
+
+.doc_table { text-align: center; width: 90%;
+ padding: 1px 1px 1px 1px; border: 1px; }
+
+.doc_warning { color: red; font-weight: bold }
+
+/* <div class="doc_code"> would use this class, and <div> adds more padding */
+.doc_code, .literal-block
+ { border: solid 1px gray; background: #eeeeee;
+ margin: 0 1em 0 1em;
+ padding: 0 1em 0 1em;
+ display: table;
+ }
+
+blockquote pre {
+ padding: 1em 2em 1em 1em;
+ border: solid 1px gray;
+ background: #eeeeee;
+ margin: 0 1em 0 1em;
+ display: table;
+}
+
+h2+div, h2+p {text-align: left; padding-left: 20pt; padding-right: 10pt;}
+h3+div, h3+p {text-align: left; padding-left: 20pt; padding-right: 10pt;}
+h4+div, h4+p {text-align: left; padding-left: 20pt; padding-right: 10pt;}
+
+/* It is preferrable to use <pre class="doc_code"> everywhere instead of the
+ * <div class="doc_code"><pre>...</ptr></div> construct.
+ *
+ * Once all docs use <pre> for code regions, this style can be merged with the
+ * one above, and we can drop the [pre] qualifier.
+ */
+pre.doc_code, .literal-block { padding: 1em 2em 1em 1em }
+
+.doc_notes { background: #fafafa; border: 1px solid #cecece;
+ display: table; padding: 0 1em 0 .1em }
+
+table.layout { text-align: left; border: none; border-collapse: collapse;
+ padding: 4px 4px 4px 4px; }
+tr.layout, td.layout, td.left, td.right
+ { border: none; padding: 4pt 4pt 2pt 2pt; vertical-align: top; }
+td.left { text-align: left }
+td.right { text-align: right }
+th.layout { border: none; font-weight: bold; font-size: 105%;
+ text-align: center; vertical-align: middle; }
+
+/* Left align table cell */
+.td_left { border: 2px solid gray; text-align: left; }
+
+/* ReST-specific */
+.title { margin-top: 0 }
+.topic-title{ display: none }
+div.contents ul { list-style-type: decimal }
+.toc-backref { color: black; text-decoration: none; }
diff --git a/docs/_templates/indexsidebar.html b/docs/_templates/indexsidebar.html
new file mode 100644
index 00000000000..416174279ce
--- /dev/null
+++ b/docs/_templates/indexsidebar.html
@@ -0,0 +1,7 @@
+{# This template defines sidebar which can be used to provide common links on
+ all documentation pages. #}
+
+<h3>Bugs</h3>
+
+<p>LLVM bugs should be reported to
+ <a href="http://llvm.org/bugs">Bugzilla</a>.</p>
diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html
new file mode 100644
index 00000000000..de5db5caf14
--- /dev/null
+++ b/docs/_templates/layout.html
@@ -0,0 +1,13 @@
+{% extends "!layout.html" %}
+
+{% block extrahead %}
+<style type="text/css">
+ table.right { float: right; margin-left: 20px; }
+ table.right td { border: 1px solid #ccc; }
+</style>
+{% endblock %}
+
+{% block rootrellink %}
+ <li><a href="http://llvm.org/">LLVM Home</a>&nbsp;|&nbsp;</li>
+ <li><a href="{{ pathto('index') }}">Documentation</a>&raquo;</li>
+{% endblock %}
diff --git a/docs/_themes/llvm-theme/layout.html b/docs/_themes/llvm-theme/layout.html
new file mode 100644
index 00000000000..746c2f56c82
--- /dev/null
+++ b/docs/_themes/llvm-theme/layout.html
@@ -0,0 +1,23 @@
+{#
+ sphinxdoc/layout.html
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Sphinx layout template for the sphinxdoc theme.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+#}
+{% extends "basic/layout.html" %}
+
+{% block relbar1 %}
+<div class="logo">
+ <a href="{{ pathto('index') }}">
+ <img src="{{pathto("_static/logo.png", 1) }}"
+ alt="LLVM Logo" width="250" height="88"/></a>
+</div>
+{{ super() }}
+{% endblock %}
+
+{# put the sidebar before the body #}
+{% block sidebar1 %}{{ sidebar() }}{% endblock %}
+{% block sidebar2 %}{% endblock %}
diff --git a/docs/_themes/llvm-theme/static/contents.png b/docs/_themes/llvm-theme/static/contents.png
new file mode 100644
index 00000000000..7fb82154a17
--- /dev/null
+++ b/docs/_themes/llvm-theme/static/contents.png
Binary files differ
diff --git a/docs/_themes/llvm-theme/static/llvm-theme.css b/docs/_themes/llvm-theme/static/llvm-theme.css
new file mode 100644
index 00000000000..f684d00ce43
--- /dev/null
+++ b/docs/_themes/llvm-theme/static/llvm-theme.css
@@ -0,0 +1,374 @@
+/*
+ * sphinxdoc.css_t
+ * ~~~~~~~~~~~~~~~
+ *
+ * Sphinx stylesheet -- sphinxdoc theme. Originally created by
+ * Armin Ronacher for Werkzeug.
+ *
+ * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+@import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+ font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+ font-size: 14px;
+ letter-spacing: -0.01em;
+ line-height: 150%;
+ text-align: center;
+ background-color: #BFD1D4;
+ color: black;
+ padding: 0;
+ border: 1px solid #aaa;
+
+ margin: 0px 80px 0px 80px;
+ min-width: 740px;
+}
+
+div.logo {
+ background-color: white;
+ text-align: left;
+ padding: 10px 10px 15px 15px;
+}
+
+div.document {
+ background-color: white;
+ text-align: left;
+ background-image: url(contents.png);
+ background-repeat: repeat-x;
+}
+
+div.bodywrapper {
+ margin: 0 240px 0 0;
+ border-right: 1px solid #ccc;
+}
+
+div.body {
+ margin: 0;
+ padding: 0.5em 20px 20px 20px;
+}
+
+div.related {
+ font-size: 1em;
+}
+
+div.related ul {
+ background-image: url(navigation.png);
+ height: 2em;
+ border-top: 1px solid #ddd;
+ border-bottom: 1px solid #ddd;
+}
+
+div.related ul li {
+ margin: 0;
+ padding: 0;
+ height: 2em;
+ float: left;
+}
+
+div.related ul li.right {
+ float: right;
+ margin-right: 5px;
+}
+
+div.related ul li a {
+ margin: 0;
+ padding: 0 5px 0 5px;
+ line-height: 1.75em;
+ color: #EE9816;
+}
+
+div.related ul li a:hover {
+ color: #3CA8E7;
+}
+
+div.sphinxsidebarwrapper {
+ padding: 0;
+}
+
+div.sphinxsidebar {
+ margin: 0;
+ padding: 0.5em 15px 15px 0;
+ width: 210px;
+ float: right;
+ font-size: 1em;
+ text-align: left;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4 {
+ margin: 1em 0 0.5em 0;
+ font-size: 1em;
+ padding: 0.1em 0 0.1em 0.5em;
+ color: white;
+ border: 1px solid #86989B;
+ background-color: #AFC1C4;
+}
+
+div.sphinxsidebar h3 a {
+ color: white;
+}
+
+div.sphinxsidebar ul {
+ padding-left: 1.5em;
+ margin-top: 7px;
+ padding: 0;
+ line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+ margin-left: 20px;
+}
+
+div.footer {
+ background-color: #E3EFF1;
+ color: #86989B;
+ padding: 3px 8px 3px 0;
+ clear: both;
+ font-size: 0.8em;
+ text-align: right;
+}
+
+div.footer a {
+ color: #86989B;
+ text-decoration: underline;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+p {
+ margin: 0.8em 0 0.5em 0;
+}
+
+a {
+ color: #CA7900;
+ text-decoration: none;
+}
+
+a:hover {
+ color: #2491CF;
+}
+
+div.body p a{
+ text-decoration: underline;
+}
+
+h1 {
+ margin: 0;
+ padding: 0.7em 0 0.3em 0;
+ font-size: 1.5em;
+ color: #11557C;
+}
+
+h2 {
+ margin: 1.3em 0 0.2em 0;
+ font-size: 1.35em;
+ padding: 0;
+}
+
+h3 {
+ margin: 1em 0 -0.3em 0;
+ font-size: 1.2em;
+}
+
+h3 a:hover {
+ text-decoration: underline;
+}
+
+div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
+ color: black!important;
+}
+
+div.body h1,
+div.body h2,
+div.body h3,
+div.body h4,
+div.body h5,
+div.body h6 {
+ background-color: #f2f2f2;
+ font-weight: normal;
+ color: #20435c;
+ border-bottom: 1px solid #ccc;
+ margin: 20px -20px 10px -20px;
+ padding: 3px 0 3px 10px;
+}
+
+div.body h1 { margin-top: 0; font-size: 200%; }
+div.body h2 { font-size: 160%; }
+div.body h3 { font-size: 140%; }
+div.body h4 { font-size: 120%; }
+div.body h5 { font-size: 110%; }
+div.body h6 { font-size: 100%; }
+
+h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
+ display: none;
+ margin: 0 0 0 0.3em;
+ padding: 0 0.2em 0 0.2em;
+ color: #aaa!important;
+}
+
+h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
+h5:hover a.anchor, h6:hover a.anchor {
+ display: inline;
+}
+
+h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
+h5 a.anchor:hover, h6 a.anchor:hover {
+ color: #777;
+ background-color: #eee;
+}
+
+a.headerlink {
+ color: #c60f0f!important;
+ font-size: 1em;
+ margin-left: 6px;
+ padding: 0 4px 0 4px;
+ text-decoration: none!important;
+}
+
+a.headerlink:hover {
+ background-color: #ccc;
+ color: white!important;
+}
+
+cite, code, tt {
+ font-family: 'Consolas', 'Deja Vu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 0.95em;
+ letter-spacing: 0.01em;
+}
+
+:not(a.reference) > tt {
+ background-color: #f2f2f2;
+ border-bottom: 1px solid #ddd;
+ color: #333;
+}
+
+tt.descname, tt.descclassname, tt.xref {
+ border: 0;
+}
+
+hr {
+ border: 1px solid #abc;
+ margin: 2em;
+}
+
+p a tt {
+ border: 0;
+ color: #CA7900;
+}
+
+p a tt:hover {
+ color: #2491CF;
+}
+
+a tt {
+ border: none;
+}
+
+pre {
+ font-family: 'Consolas', 'Deja Vu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 0.95em;
+ letter-spacing: 0.015em;
+ line-height: 120%;
+ padding: 0.5em;
+ border: 1px solid #ccc;
+ background-color: #f8f8f8;
+}
+
+pre a {
+ color: inherit;
+ text-decoration: underline;
+}
+
+td.linenos pre {
+ padding: 0.5em 0;
+}
+
+div.quotebar {
+ background-color: #f8f8f8;
+ max-width: 250px;
+ float: right;
+ padding: 2px 7px;
+ border: 1px solid #ccc;
+}
+
+div.topic {
+ background-color: #f8f8f8;
+}
+
+table {
+ border-collapse: collapse;
+ margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+ padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.admonition, div.warning {
+ font-size: 0.9em;
+ margin: 1em 0 1em 0;
+ border: 1px solid #86989B;
+ background-color: #f7f7f7;
+ padding: 0;
+}
+
+div.admonition p, div.warning p {
+ margin: 0.5em 1em 0.5em 1em;
+ padding: 0;
+}
+
+div.admonition pre, div.warning pre {
+ margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition p.admonition-title,
+div.warning p.admonition-title {
+ margin: 0;
+ padding: 0.1em 0 0.1em 0.5em;
+ color: white;
+ border-bottom: 1px solid #86989B;
+ font-weight: bold;
+ background-color: #AFC1C4;
+}
+
+div.warning {
+ border: 1px solid #940000;
+}
+
+div.warning p.admonition-title {
+ background-color: #CF0000;
+ border-bottom-color: #940000;
+}
+
+div.admonition ul, div.admonition ol,
+div.warning ul, div.warning ol {
+ margin: 0.1em 0.5em 0.5em 3em;
+ padding: 0;
+}
+
+div.versioninfo {
+ margin: 1em 0 0 0;
+ border: 1px solid #ccc;
+ background-color: #DDEAF0;
+ padding: 8px;
+ line-height: 1.3em;
+ font-size: 0.9em;
+}
+
+.viewcode-back {
+ font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+}
+
+div.viewcode-block:target {
+ background-color: #f4debf;
+ border-top: 1px solid #ac9;
+ border-bottom: 1px solid #ac9;
+}
diff --git a/docs/_themes/llvm-theme/static/logo.png b/docs/_themes/llvm-theme/static/logo.png
new file mode 100644
index 00000000000..18d424c53c0
--- /dev/null
+++ b/docs/_themes/llvm-theme/static/logo.png
Binary files differ
diff --git a/docs/_themes/llvm-theme/static/navigation.png b/docs/_themes/llvm-theme/static/navigation.png
new file mode 100644
index 00000000000..1081dc1439f
--- /dev/null
+++ b/docs/_themes/llvm-theme/static/navigation.png
Binary files differ
diff --git a/docs/_themes/llvm-theme/theme.conf b/docs/_themes/llvm-theme/theme.conf
new file mode 100644
index 00000000000..573fd78aba9
--- /dev/null
+++ b/docs/_themes/llvm-theme/theme.conf
@@ -0,0 +1,4 @@
+[theme]
+inherit = basic
+stylesheet = llvm-theme.css
+pygments_style = friendly
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 00000000000..a1e9b5f6e28
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 -*-
+#
+# LLVM documentation build configuration file.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'LLVM'
+copyright = u'2012, LLVM Project'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '3.2'
+# The full version, including alpha/beta/rc tags.
+release = '3.2'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+today_fmt = '%Y-%m-%d'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+show_authors = True
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'friendly'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'llvm-theme'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = ["_themes"]
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+html_last_updated_fmt = '%Y-%m-%d'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+html_sidebars = {'index': 'indexsidebar.html'}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'LLVMdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'LLVM.tex', u'LLVM Documentation',
+ u'LLVM project', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = []
+
+# Automatically derive the list of man pages from the contents of the command
+# guide subdirectory.
+basedir = os.path.dirname(__file__)
+man_page_authors = "Maintained by The LLVM Team (http://llvm.org/)."
+command_guide_subpath = 'CommandGuide'
+command_guide_path = os.path.join(basedir, command_guide_subpath)
+for name in os.listdir(command_guide_path):
+ # Ignore non-ReST files and the index page.
+ if not name.endswith('.rst') or name in ('index.rst',):
+ continue
+
+ # Otherwise, automatically extract the description.
+ file_subpath = os.path.join(command_guide_subpath, name)
+ with open(os.path.join(command_guide_path, name)) as f:
+ title = f.readline().rstrip('\n')
+ header = f.readline().rstrip('\n')
+
+ if len(header) != len(title):
+ print >>sys.stderr, (
+ "error: invalid header in %r (does not match title)" % (
+ file_subpath,))
+ if ' - ' not in title:
+ print >>sys.stderr, (
+ ("error: invalid title in %r "
+ "(expected '<name> - <description>')") % (
+ file_subpath,))
+
+ # Split the name out of the title.
+ name,description = title.split(' - ', 1)
+ man_pages.append((file_subpath.replace('.rst',''), name,
+ description, man_page_authors, 1))
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+# FIXME: Define intersphinx configration.
+intersphinx_mapping = {}
diff --git a/docs/design_and_overview.rst b/docs/design_and_overview.rst
new file mode 100644
index 00000000000..ea684155e00
--- /dev/null
+++ b/docs/design_and_overview.rst
@@ -0,0 +1,36 @@
+.. _design_and_overview:
+
+LLVM Design & Overview
+======================
+
+.. toctree::
+ :hidden:
+
+ GetElementPtr
+
+* `LLVM Language Reference Manual <LangRef.html>`_
+
+ Defines the LLVM intermediate representation.
+
+* `Introduction to the LLVM Compiler <http://llvm.org/pubs/2008-10-04-ACAT-LLVM-Intro.html>`_
+
+ Presentation providing a users introduction to LLVM.
+
+* `Intro to LLVM <http://www.aosabook.org/en/llvm.html>`_
+
+ Book chapter providing a compiler hacker's introduction to LLVM.
+
+* `LLVM: A Compilation Framework forLifelong Program Analysis & Transformation
+ <http://llvm.org/pubs/2004-01-30-CGO-LLVM.html>`_
+
+ Design overview.
+
+* `LLVM: An Infrastructure for Multi-Stage Optimization
+ <http://llvm.org/pubs/2002-12-LattnerMSThesis.html>`_
+
+ More details (quite old now).
+
+* :ref:`gep`
+
+ Answers to some very frequent questions about LLVM's most frequently
+ misunderstood instruction.
diff --git a/docs/development_process.rst b/docs/development_process.rst
new file mode 100644
index 00000000000..4fc20b34129
--- /dev/null
+++ b/docs/development_process.rst
@@ -0,0 +1,30 @@
+.. _development_process:
+
+Development Process Documentation
+=================================
+
+.. toctree::
+ :hidden:
+
+ MakefileGuide
+ Projects
+
+* :ref:`projects`
+
+ How-to guide and templates for new projects that *use* the LLVM
+ infrastructure. The templates (directory organization, Makefiles, and test
+ tree) allow the project code to be located outside (or inside) the ``llvm/``
+ tree, while using LLVM header files and libraries.
+
+* `LLVMBuild Documentation <LLVMBuild.html>`_
+
+ Describes the LLVMBuild organization and files used by LLVM to specify
+ component descriptions.
+
+* :ref:`makefile_guide`
+
+ Describes how the LLVM makefiles work and how to use them.
+
+* `How To Release LLVM To The Public <HowToReleaseLLVM.html>`_
+
+ This is a guide to preparing LLVM releases. Most developers can ignore it.
diff --git a/docs/doxygen.cfg.in b/docs/doxygen.cfg.in
new file mode 100644
index 00000000000..20de0773f40
--- /dev/null
+++ b/docs/doxygen.cfg.in
@@ -0,0 +1,1632 @@
+# Doxyfile 1.7.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = LLVM
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER = @PACKAGE_VERSION@
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = @abs_top_builddir@/docs/doxygen
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak,
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH = ../..
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 2
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given extension.
+# Doxygen has a built-in mapping, but you can override or extend it using this
+# tag. The format is ext=language, where ext is a file extension, and language
+# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
+# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
+# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen to replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penality.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will rougly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespace are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. The create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
+
+LAYOUT_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = NO
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = NO
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT =
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = @abs_top_srcdir@/include \
+ @abs_top_srcdir@/lib \
+ @abs_top_srcdir@/docs/doxygen.intro
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS =
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH = @abs_top_srcdir@/examples
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = YES
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH = @abs_top_srcdir@/docs/img
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = YES
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = NO
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 4
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX = llvm::
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER = @abs_top_srcdir@/docs/doxygen.header
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER = @abs_top_srcdir@/docs/doxygen.footer
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET = @abs_top_srcdir@/docs/doxygen.css
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the stylesheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+# will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NO
+
+# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list.
+
+USE_INLINE_TREES = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT = YES
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE = NO
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a PHP enabled web server instead of at the web client
+# using Javascript. Doxygen will generate the search PHP script and index
+# file to put on the web server. The advantage of the server
+# based approach is that it scales better to large projects and allows
+# full text search. The disadvances is that it is more difficult to setup
+# and does not have live searching capabilities.
+
+SERVER_BASED_SEARCH = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT =
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = letter
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT =
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT =
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION =
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH = ../include
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = YES
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = NO
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = YES
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS = 0
+
+# By default doxygen will write a font called FreeSans.ttf to the output
+# directory and reference it in all dot files that doxygen generates. This
+# font does not include all possible unicode characters however, so when you need
+# these (or just want a differently looking font) you can specify the font name
+# using DOT_FONTNAME. You need need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH = @DOT@
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = YES
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
diff --git a/docs/doxygen.css b/docs/doxygen.css
new file mode 100644
index 00000000000..83951f673db
--- /dev/null
+++ b/docs/doxygen.css
@@ -0,0 +1,408 @@
+BODY,H1,H2,H3,H4,H5,H6,P,CENTER,TD,TH,UL,DL,DIV {
+ font-family: Verdana,Geneva,Arial,Helvetica,sans-serif;
+}
+BODY,TD {
+ font-size: 90%;
+}
+H1 {
+ text-align: center;
+ font-size: 140%;
+ font-weight: bold;
+}
+H2 {
+ font-size: 120%;
+ font-style: italic;
+}
+H3 {
+ font-size: 100%;
+}
+CAPTION { font-weight: bold }
+DIV.qindex {
+ width: 100%;
+ background-color: #eeeeff;
+ border: 1px solid #b0b0b0;
+ text-align: center;
+ margin: 2px;
+ padding: 2px;
+ line-height: 140%;
+}
+DIV.nav {
+ width: 100%;
+ background-color: #eeeeff;
+ border: 1px solid #b0b0b0;
+ text-align: center;
+ margin: 2px;
+ padding: 2px;
+ line-height: 140%;
+}
+DIV.navtab {
+ background-color: #eeeeff;
+ border: 1px solid #b0b0b0;
+ text-align: center;
+ margin: 2px;
+ margin-right: 15px;
+ padding: 2px;
+}
+TD.navtab {
+ font-size: 70%;
+}
+A.qindex {
+ text-decoration: none;
+ font-weight: bold;
+ color: #1A419D;
+}
+A.qindex:visited {
+ text-decoration: none;
+ font-weight: bold;
+ color: #1A419D
+}
+A.qindex:hover {
+ text-decoration: none;
+ background-color: #ddddff;
+}
+A.qindexHL {
+ text-decoration: none;
+ font-weight: bold;
+ background-color: #6666cc;
+ color: #ffffff;
+ border: 1px double #9295C2;
+}
+A.qindexHL:hover {
+ text-decoration: none;
+ background-color: #6666cc;
+ color: #ffffff;
+}
+A.qindexHL:visited {
+ text-decoration: none; background-color: #6666cc; color: #ffffff }
+A.el { text-decoration: none; font-weight: bold }
+A.elRef { font-weight: bold }
+A.code:link { text-decoration: none; font-weight: normal; color: #0000FF}
+A.code:visited { text-decoration: none; font-weight: normal; color: #0000FF}
+A.codeRef:link { font-weight: normal; color: #0000FF}
+A.codeRef:visited { font-weight: normal; color: #0000FF}
+A:hover { text-decoration: none; background-color: #f2f2ff }
+DL.el { margin-left: -1cm }
+.fragment {
+ font-family: Fixed, monospace;
+ font-size: 95%;
+}
+PRE.fragment {
+ border: 1px solid #CCCCCC;
+ background-color: #f5f5f5;
+ margin-top: 4px;
+ margin-bottom: 4px;
+ margin-left: 2px;
+ margin-right: 8px;
+ padding-left: 6px;
+ padding-right: 6px;
+ padding-top: 4px;
+ padding-bottom: 4px;
+}
+DIV.ah { background-color: black; font-weight: bold; color: #ffffff; margin-bottom: 3px; margin-top: 3px }
+TD.md { background-color: #F4F4FB; font-weight: bold; }
+TD.mdPrefix {
+ background-color: #F4F4FB;
+ color: #606060;
+ font-size: 80%;
+}
+TD.mdname1 { background-color: #F4F4FB; font-weight: bold; color: #602020; }
+TD.mdname { background-color: #F4F4FB; font-weight: bold; color: #602020; width: 600px; }
+DIV.groupHeader {
+ margin-left: 16px;
+ margin-top: 12px;
+ margin-bottom: 6px;
+ font-weight: bold;
+}
+DIV.groupText { margin-left: 16px; font-style: italic; font-size: 90% }
+BODY {
+ background: white;
+ color: black;
+ margin-right: 20px;
+ margin-left: 20px;
+}
+TD.indexkey {
+ background-color: #eeeeff;
+ font-weight: bold;
+ padding-right : 10px;
+ padding-top : 2px;
+ padding-left : 10px;
+ padding-bottom : 2px;
+ margin-left : 0px;
+ margin-right : 0px;
+ margin-top : 2px;
+ margin-bottom : 2px;
+ border: 1px solid #CCCCCC;
+}
+TD.indexvalue {
+ background-color: #eeeeff;
+ font-style: italic;
+ padding-right : 10px;
+ padding-top : 2px;
+ padding-left : 10px;
+ padding-bottom : 2px;
+ margin-left : 0px;
+ margin-right : 0px;
+ margin-top : 2px;
+ margin-bottom : 2px;
+ border: 1px solid #CCCCCC;
+}
+TR.memlist {
+ background-color: #f0f0f0;
+}
+P.formulaDsp { text-align: center; }
+IMG.formulaDsp { }
+IMG.formulaInl { vertical-align: middle; }
+SPAN.keyword { color: #008000 }
+SPAN.keywordtype { color: #604020 }
+SPAN.keywordflow { color: #e08000 }
+SPAN.comment { color: #800000 }
+SPAN.preprocessor { color: #806020 }
+SPAN.stringliteral { color: #002080 }
+SPAN.charliteral { color: #008080 }
+.mdTable {
+ border: 1px solid #868686;
+ background-color: #F4F4FB;
+}
+.mdRow {
+ padding: 8px 10px;
+}
+.mdescLeft {
+ padding: 0px 8px 4px 8px;
+ font-size: 80%;
+ font-style: italic;
+ background-color: #FAFAFA;
+ border-top: 1px none #E0E0E0;
+ border-right: 1px none #E0E0E0;
+ border-bottom: 1px none #E0E0E0;
+ border-left: 1px none #E0E0E0;
+ margin: 0px;
+}
+.mdescRight {
+ padding: 0px 8px 4px 8px;
+ font-size: 80%;
+ font-style: italic;
+ background-color: #FAFAFA;
+ border-top: 1px none #E0E0E0;
+ border-right: 1px none #E0E0E0;
+ border-bottom: 1px none #E0E0E0;
+ border-left: 1px none #E0E0E0;
+ margin: 0px;
+}
+.memItemLeft {
+ padding: 1px 0px 0px 8px;
+ margin: 4px;
+ border-top-width: 1px;
+ border-right-width: 1px;
+ border-bottom-width: 1px;
+ border-left-width: 1px;
+ border-top-color: #E0E0E0;
+ border-right-color: #E0E0E0;
+ border-bottom-color: #E0E0E0;
+ border-left-color: #E0E0E0;
+ border-top-style: solid;
+ border-right-style: none;
+ border-bottom-style: none;
+ border-left-style: none;
+ background-color: #FAFAFA;
+ font-size: 80%;
+}
+.memItemRight {
+ padding: 1px 8px 0px 8px;
+ margin: 4px;
+ border-top-width: 1px;
+ border-right-width: 1px;
+ border-bottom-width: 1px;
+ border-left-width: 1px;
+ border-top-color: #E0E0E0;
+ border-right-color: #E0E0E0;
+ border-bottom-color: #E0E0E0;
+ border-left-color: #E0E0E0;
+ border-top-style: solid;
+ border-right-style: none;
+ border-bottom-style: none;
+ border-left-style: none;
+ background-color: #FAFAFA;
+ font-size: 80%;
+}
+.memTemplItemLeft {
+ padding: 1px 0px 0px 8px;
+ margin: 4px;
+ border-top-width: 1px;
+ border-right-width: 1px;
+ border-bottom-width: 1px;
+ border-left-width: 1px;
+ border-top-color: #E0E0E0;
+ border-right-color: #E0E0E0;
+ border-bottom-color: #E0E0E0;
+ border-left-color: #E0E0E0;
+ border-top-style: none;
+ border-right-style: none;
+ border-bottom-style: none;
+ border-left-style: none;
+ background-color: #FAFAFA;
+ font-size: 80%;
+}
+.memTemplItemRight {
+ padding: 1px 8px 0px 8px;
+ margin: 4px;
+ border-top-width: 1px;
+ border-right-width: 1px;
+ border-bottom-width: 1px;
+ border-left-width: 1px;
+ border-top-color: #E0E0E0;
+ border-right-color: #E0E0E0;
+ border-bottom-color: #E0E0E0;
+ border-left-color: #E0E0E0;
+ border-top-style: none;
+ border-right-style: none;
+ border-bottom-style: none;
+ border-left-style: none;
+ background-color: #FAFAFA;
+ font-size: 80%;
+}
+.memTemplParams {
+ padding: 1px 0px 0px 8px;
+ margin: 4px;
+ border-top-width: 1px;
+ border-right-width: 1px;
+ border-bottom-width: 1px;
+ border-left-width: 1px;
+ border-top-color: #E0E0E0;
+ border-right-color: #E0E0E0;
+ border-bottom-color: #E0E0E0;
+ border-left-color: #E0E0E0;
+ border-top-style: solid;
+ border-right-style: none;
+ border-bottom-style: none;
+ border-left-style: none;
+ color: #606060;
+ background-color: #FAFAFA;
+ font-size: 80%;
+}
+.search { color: #003399;
+ font-weight: bold;
+}
+FORM.search {
+ margin-bottom: 0px;
+ margin-top: 0px;
+}
+INPUT.search { font-size: 75%;
+ color: #000080;
+ font-weight: normal;
+ background-color: #eeeeff;
+}
+TD.tiny { font-size: 75%;
+}
+a {
+ color: #252E78;
+}
+a:visited {
+ color: #3D2185;
+}
+.dirtab { padding: 4px;
+ border-collapse: collapse;
+ border: 1px solid #b0b0b0;
+}
+TH.dirtab { background: #eeeeff;
+ font-weight: bold;
+}
+HR { height: 1px;
+ border: none;
+ border-top: 1px solid black;
+}
+
+/*
+ * LLVM Modifications.
+ * Note: Everything above here is generated with "doxygen -w htlm" command. See
+ * "doxygen --help" for details. What follows are CSS overrides for LLVM
+ * specific formatting. We want to keep the above so it can be replaced with
+ * subsequent doxygen upgrades.
+ */
+
+.footer {
+ font-size: 80%;
+ font-weight: bold;
+ text-align: center;
+ vertical-align: middle;
+}
+.title {
+ font-size: 25pt;
+ color: black;
+ font-weight: bold;
+ border-width: 1px;
+ border-style: solid none solid none;
+ text-align: center;
+ vertical-align: middle;
+ padding-left: 8pt;
+ padding-top: 1px;
+ padding-bottom: 2px
+}
+A:link {
+ cursor: pointer;
+ text-decoration: none;
+ font-weight: bolder;
+}
+A:visited {
+ cursor: pointer;
+ text-decoration: underline;
+ font-weight: bolder;
+}
+A:hover {
+ cursor: pointer;
+ text-decoration: underline;
+ font-weight: bolder;
+}
+A:active {
+ cursor: pointer;
+ text-decoration: underline;
+ font-weight: bolder;
+ font-style: italic;
+}
+H1 {
+ text-align: center;
+ font-size: 140%;
+ font-weight: bold;
+}
+H2 {
+ font-size: 120%;
+ font-style: italic;
+}
+H3 {
+ font-size: 100%;
+}
+
+H2, H3 {
+ border-bottom: 2px solid;
+ margin-top: 2em;
+}
+
+A.qindex {}
+A.qindexRef {}
+A.el { text-decoration: none; font-weight: bold }
+A.elRef { font-weight: bold }
+A.code { text-decoration: none; font-weight: normal; color: #4444ee }
+A.codeRef { font-weight: normal; color: #4444ee }
+
+div.memitem {
+ border: 1px solid #999999;
+ margin-top: 1.0em;
+ margin-bottom: 1.0em;
+ -webkit-border-radius: 0.5em;
+ -webkit-box-shadow: 3px 3px 6px #777777;
+ -moz-border-radius: 0.5em;
+ -moz-box-shadow: black 3px 3px 3px;
+}
+
+div.memproto {
+ background-color: #E3E4E5;
+ padding: 0.25em 0.5em;
+ -webkit-border-top-left-radius: 0.5em;
+ -webkit-border-top-right-radius: 0.5em;
+ -moz-border-radius-topleft: 0.5em;
+ -moz-border-radius-topright: 0.5em;
+}
+
+div.memdoc {
+ padding-left: 1em;
+ padding-right: 1em;
+}
diff --git a/docs/doxygen.footer b/docs/doxygen.footer
new file mode 100644
index 00000000000..c492e7df6cb
--- /dev/null
+++ b/docs/doxygen.footer
@@ -0,0 +1,13 @@
+<hr>
+<p class="footer">
+Generated on $datetime for <a href="http://llvm.org/">$projectname</a> by
+<a href="http://www.doxygen.org"><img src="doxygen.png" alt="Doxygen"
+align="middle" border="0"/>$doxygenversion</a><br>
+Copyright &copy; 2003-2012 University of Illinois at Urbana-Champaign.
+All Rights Reserved.</p>
+
+<hr>
+<!--#include virtual="/attrib.incl" -->
+
+</body>
+</html>
diff --git a/docs/doxygen.header b/docs/doxygen.header
new file mode 100644
index 00000000000..56fb77fafdd
--- /dev/null
+++ b/docs/doxygen.header
@@ -0,0 +1,9 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head>
+<meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1"/>
+<meta name="keywords" content="LLVM,Low Level Virtual Machine,C++,doxygen,API,documentation"/>
+<meta name="description" content="C++ source code API documentation for LLVM."/>
+<title>LLVM: $title</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css"/>
+</head><body>
+<p class="title">LLVM API Documentation</p>
diff --git a/docs/doxygen.intro b/docs/doxygen.intro
new file mode 100644
index 00000000000..699dadc27e8
--- /dev/null
+++ b/docs/doxygen.intro
@@ -0,0 +1,18 @@
+/// @mainpage LLVM
+///
+/// @section main_intro Introduction
+/// Welcome to LLVM.
+///
+/// This documentation describes the @b internal software that makes
+/// up LLVM, not the @b external use of LLVM. There are no instructions
+/// here on how to use LLVM, only the APIs that make up the software. For usage
+/// instructions, please see the programmer's guide or reference manual.
+///
+/// @section main_caveat Caveat
+/// This documentation is generated directly from the source code with doxygen.
+/// Since LLVM is constantly under active development, what you're about to
+/// read is out of date! However, it may still be useful since certain portions
+/// of LLVM are very stable.
+///
+/// @section main_changelog Change Log
+/// - Original content written 12/30/2003 by Reid Spencer
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 00000000000..53d3e7c01b7
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,70 @@
+.. _contents:
+
+Overview
+========
+
+.. warning::
+
+ If you are using a released version of LLVM, see `the download page
+ <http://llvm.org/releases/>`_ to find your documentation.
+
+The LLVM compiler infrastructure supports a wide range of projects, from
+industrial strength compilers to specialized JIT applications to small
+research projects.
+
+Similarly, documentation is broken down into several high-level groupings
+targeted at different audiences:
+
+ * **Design & Overview**
+
+ Several introductory papers and presentations are available at
+ :ref:`design_and_overview`.
+
+ * **Publications**
+
+ The list of `publications <http://llvm.org/pubs>`_ based on LLVM.
+
+ * **User Guides**
+
+ Those new to the LLVM system should first vist the :ref:`userguides`.
+
+ NOTE: If you are a user who is only interested in using LLVM-based
+ compilers, you should look into `Clang <http://clang.llvm.org>`_ or
+ `DragonEgg <http://dragonegg.llvm.org>`_ instead. The documentation here is
+ intended for users who have a need to work with the intermediate LLVM
+ representation.
+
+ * **API Clients**
+
+ Developers of applications which use LLVM as a library should visit the
+ :ref:`programming`.
+
+ * **Subsystems**
+
+ API clients and LLVM developers may be interested in the
+ :ref:`subsystems` documentation.
+
+ * **Development Process**
+
+ Additional documentation on the LLVM project can be found at
+ :ref:`development_process`.
+
+ * **Mailing Lists**
+
+ For more information, consider consulting the LLVM :ref:`mailing_lists`.
+
+.. toctree::
+ :maxdepth: 2
+
+ design_and_overview
+ userguides
+ programming
+ subsystems
+ development_process
+ mailing_lists
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`search`
diff --git a/docs/mailing_lists.rst b/docs/mailing_lists.rst
new file mode 100644
index 00000000000..106f1da48f8
--- /dev/null
+++ b/docs/mailing_lists.rst
@@ -0,0 +1,35 @@
+.. _mailing_lists:
+
+Mailing Lists
+=============
+
+ * `LLVM Announcements List
+ <http://lists.cs.uiuc.edu/mailman/listinfo/llvm-announce>`_
+
+ This is a low volume list that provides important announcements regarding
+ LLVM. It gets email about once a month.
+
+ * `Developer's List <http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev>`_
+
+ This list is for people who want to be included in technical discussions of
+ LLVM. People post to this list when they have questions about writing code
+ for or using the LLVM tools. It is relatively low volume.
+
+ * `Bugs & Patches Archive <http://lists.cs.uiuc.edu/pipermail/llvmbugs/>`_
+
+ This list gets emailed every time a bug is opened and closed, and when people
+ submit patches to be included in LLVM. It is higher volume than the LLVMdev
+ list.
+
+ * `Commits Archive <http://lists.cs.uiuc.edu/pipermail/llvm-commits/>`_
+
+ This list contains all commit messages that are made when LLVM developers
+ commit code changes to the repository. It is useful for those who want to
+ stay on the bleeding edge of LLVM development. This list is very high volume.
+
+ * `Test Results Archive
+ <http://lists.cs.uiuc.edu/pipermail/llvm-testresults/>`_
+
+ A message is automatically sent to this list by every active nightly tester
+ when it completes. As such, this list gets email several times each day,
+ making it a high volume list.
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 00000000000..8dfec039fdc
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,190 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+ set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^<target^>` where ^<target^> is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. singlehtml to make a single large HTML file
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. devhelp to make HTML files and a Devhelp project
+ echo. epub to make an epub
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. text to make text files
+ echo. man to make manual pages
+ echo. texinfo to make Texinfo files
+ echo. gettext to make PO message catalogs
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "singlehtml" (
+ %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\llvm.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\llvm.ghc
+ goto end
+)
+
+if "%1" == "devhelp" (
+ %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished.
+ goto end
+)
+
+if "%1" == "epub" (
+ %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The epub file is in %BUILDDIR%/epub.
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "text" (
+ %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The text files are in %BUILDDIR%/text.
+ goto end
+)
+
+if "%1" == "man" (
+ %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The manual pages are in %BUILDDIR%/man.
+ goto end
+)
+
+if "%1" == "texinfo" (
+ %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+ goto end
+)
+
+if "%1" == "gettext" (
+ %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+:end
diff --git a/docs/programming.rst b/docs/programming.rst
new file mode 100644
index 00000000000..27e43014ee3
--- /dev/null
+++ b/docs/programming.rst
@@ -0,0 +1,40 @@
+.. _programming:
+
+Programming Documentation
+=========================
+
+.. toctree::
+ :hidden:
+
+ CodingStandards
+ CommandLine
+
+* `LLVM Language Reference Manual <LangRef.html>`_
+
+ Defines the LLVM intermediate representation and the assembly form of the
+ different nodes.
+
+* `The LLVM Programmers Manual <ProgrammersManual.html>`_
+
+ Introduction to the general layout of the LLVM sourcebase, important classes
+ and APIs, and some tips & tricks.
+
+* :ref:`commandline`
+
+ Provides information on using the command line parsing library.
+
+* :ref:`coding_standards`
+
+ Details the LLVM coding standards and provides useful information on writing
+ efficient C++ code.
+
+* `Extending LLVM <ExtendingLLVM.html>`_
+
+ Look here to see how to add instructions and intrinsics to LLVM.
+
+* `Doxygen generated documentation <http://llvm.org/doxygen/>`_
+
+ (`classes <http://llvm.org/doxygen/inherits.html>`_)
+ (`tarball <http://llvm.org/doxygen/doxygen.tar.gz>`_)
+
+* `ViewVC Repository Browser <http://llvm.org/viewvc/>`_
diff --git a/docs/re_format.7 b/docs/re_format.7
new file mode 100644
index 00000000000..0c0928716f4
--- /dev/null
+++ b/docs/re_format.7
@@ -0,0 +1,756 @@
+.\" $OpenBSD: re_format.7,v 1.14 2007/05/31 19:19:30 jmc Exp $
+.\"
+.\" Copyright (c) 1997, Phillip F Knaack. All rights reserved.
+.\"
+.\" Copyright (c) 1992, 1993, 1994 Henry Spencer.
+.\" Copyright (c) 1992, 1993, 1994
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software contributed to Berkeley by
+.\" Henry Spencer.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" @(#)re_format.7 8.3 (Berkeley) 3/20/94
+.\"
+.Dd $Mdocdate: May 31 2007 $
+.Dt RE_FORMAT 7
+.Os
+.Sh NAME
+.Nm re_format
+.Nd POSIX regular expressions
+.Sh DESCRIPTION
+Regular expressions (REs),
+as defined in
+.St -p1003.1-2004 ,
+come in two forms:
+basic regular expressions
+(BREs)
+and extended regular expressions
+(EREs).
+Both forms of regular expressions are supported
+by the interfaces described in
+.Xr regex 3 .
+Applications dealing with regular expressions
+may use one or the other form
+(or indeed both).
+For example,
+.Xr ed 1
+uses BREs,
+whilst
+.Xr egrep 1
+talks EREs.
+Consult the manual page for the specific application to find out which
+it uses.
+.Pp
+POSIX leaves some aspects of RE syntax and semantics open;
+.Sq **
+marks decisions on these aspects that
+may not be fully portable to other POSIX implementations.
+.Pp
+This manual page first describes regular expressions in general,
+specifically extended regular expressions,
+and then discusses differences between them and basic regular expressions.
+.Sh EXTENDED REGULAR EXPRESSIONS
+An ERE is one** or more non-empty**
+.Em branches ,
+separated by
+.Sq \*(Ba .
+It matches anything that matches one of the branches.
+.Pp
+A branch is one** or more
+.Em pieces ,
+concatenated.
+It matches a match for the first, followed by a match for the second, etc.
+.Pp
+A piece is an
+.Em atom
+possibly followed by a single**
+.Sq * ,
+.Sq + ,
+.Sq ?\& ,
+or
+.Em bound .
+An atom followed by
+.Sq *
+matches a sequence of 0 or more matches of the atom.
+An atom followed by
+.Sq +
+matches a sequence of 1 or more matches of the atom.
+An atom followed by
+.Sq ?\&
+matches a sequence of 0 or 1 matches of the atom.
+.Pp
+A bound is
+.Sq {
+followed by an unsigned decimal integer,
+possibly followed by
+.Sq ,\&
+possibly followed by another unsigned decimal integer,
+always followed by
+.Sq } .
+The integers must lie between 0 and
+.Dv RE_DUP_MAX
+(255**) inclusive,
+and if there are two of them, the first may not exceed the second.
+An atom followed by a bound containing one integer
+.Ar i
+and no comma matches
+a sequence of exactly
+.Ar i
+matches of the atom.
+An atom followed by a bound
+containing one integer
+.Ar i
+and a comma matches
+a sequence of
+.Ar i
+or more matches of the atom.
+An atom followed by a bound
+containing two integers
+.Ar i
+and
+.Ar j
+matches a sequence of
+.Ar i
+through
+.Ar j
+(inclusive) matches of the atom.
+.Pp
+An atom is a regular expression enclosed in
+.Sq ()
+(matching a part of the regular expression),
+an empty set of
+.Sq ()
+(matching the null string)**,
+a
+.Em bracket expression
+(see below),
+.Sq .\&
+(matching any single character),
+.Sq ^
+(matching the null string at the beginning of a line),
+.Sq $
+(matching the null string at the end of a line),
+a
+.Sq \e
+followed by one of the characters
+.Sq ^.[$()|*+?{\e
+(matching that character taken as an ordinary character),
+a
+.Sq \e
+followed by any other character**
+(matching that character taken as an ordinary character,
+as if the
+.Sq \e
+had not been present**),
+or a single character with no other significance (matching that character).
+A
+.Sq {
+followed by a character other than a digit is an ordinary character,
+not the beginning of a bound**.
+It is illegal to end an RE with
+.Sq \e .
+.Pp
+A bracket expression is a list of characters enclosed in
+.Sq [] .
+It normally matches any single character from the list (but see below).
+If the list begins with
+.Sq ^ ,
+it matches any single character
+.Em not
+from the rest of the list
+(but see below).
+If two characters in the list are separated by
+.Sq - ,
+this is shorthand for the full
+.Em range
+of characters between those two (inclusive) in the
+collating sequence, e.g.\&
+.Sq [0-9]
+in ASCII matches any decimal digit.
+It is illegal** for two ranges to share an endpoint, e.g.\&
+.Sq a-c-e .
+Ranges are very collating-sequence-dependent,
+and portable programs should avoid relying on them.
+.Pp
+To include a literal
+.Sq ]\&
+in the list, make it the first character
+(following a possible
+.Sq ^ ) .
+To include a literal
+.Sq - ,
+make it the first or last character,
+or the second endpoint of a range.
+To use a literal
+.Sq -
+as the first endpoint of a range,
+enclose it in
+.Sq [.
+and
+.Sq .]
+to make it a collating element (see below).
+With the exception of these and some combinations using
+.Sq [
+(see next paragraphs),
+all other special characters, including
+.Sq \e ,
+lose their special significance within a bracket expression.
+.Pp
+Within a bracket expression, a collating element
+(a character,
+a multi-character sequence that collates as if it were a single character,
+or a collating-sequence name for either)
+enclosed in
+.Sq [.
+and
+.Sq .]
+stands for the sequence of characters of that collating element.
+The sequence is a single element of the bracket expression's list.
+A bracket expression containing a multi-character collating element
+can thus match more than one character,
+e.g. if the collating sequence includes a
+.Sq ch
+collating element,
+then the RE
+.Sq [[.ch.]]*c
+matches the first five characters of
+.Sq chchcc .
+.Pp
+Within a bracket expression, a collating element enclosed in
+.Sq [=
+and
+.Sq =]
+is an equivalence class, standing for the sequences of characters
+of all collating elements equivalent to that one, including itself.
+(If there are no other equivalent collating elements,
+the treatment is as if the enclosing delimiters were
+.Sq [.
+and
+.Sq .] . )
+For example, if
+.Sq x
+and
+.Sq y
+are the members of an equivalence class,
+then
+.Sq [[=x=]] ,
+.Sq [[=y=]] ,
+and
+.Sq [xy]
+are all synonymous.
+An equivalence class may not** be an endpoint of a range.
+.Pp
+Within a bracket expression, the name of a
+.Em character class
+enclosed
+in
+.Sq [:
+and
+.Sq :]
+stands for the list of all characters belonging to that class.
+Standard character class names are:
+.Bd -literal -offset indent
+alnum digit punct
+alpha graph space
+blank lower upper
+cntrl print xdigit
+.Ed
+.Pp
+These stand for the character classes defined in
+.Xr ctype 3 .
+A locale may provide others.
+A character class may not be used as an endpoint of a range.
+.Pp
+There are two special cases** of bracket expressions:
+the bracket expressions
+.Sq [[:<:]]
+and
+.Sq [[:>:]]
+match the null string at the beginning and end of a word, respectively.
+A word is defined as a sequence of
+characters starting and ending with a word character
+which is neither preceded nor followed by
+word characters.
+A word character is an
+.Em alnum
+character (as defined by
+.Xr ctype 3 )
+or an underscore.
+This is an extension,
+compatible with but not specified by POSIX,
+and should be used with
+caution in software intended to be portable to other systems.
+.Pp
+In the event that an RE could match more than one substring of a given
+string,
+the RE matches the one starting earliest in the string.
+If the RE could match more than one substring starting at that point,
+it matches the longest.
+Subexpressions also match the longest possible substrings, subject to
+the constraint that the whole match be as long as possible,
+with subexpressions starting earlier in the RE taking priority over
+ones starting later.
+Note that higher-level subexpressions thus take priority over
+their lower-level component subexpressions.
+.Pp
+Match lengths are measured in characters, not collating elements.
+A null string is considered longer than no match at all.
+For example,
+.Sq bb*
+matches the three middle characters of
+.Sq abbbc ;
+.Sq (wee|week)(knights|nights)
+matches all ten characters of
+.Sq weeknights ;
+when
+.Sq (.*).*
+is matched against
+.Sq abc ,
+the parenthesized subexpression matches all three characters;
+and when
+.Sq (a*)*
+is matched against
+.Sq bc ,
+both the whole RE and the parenthesized subexpression match the null string.
+.Pp
+If case-independent matching is specified,
+the effect is much as if all case distinctions had vanished from the
+alphabet.
+When an alphabetic that exists in multiple cases appears as an
+ordinary character outside a bracket expression, it is effectively
+transformed into a bracket expression containing both cases,
+e.g.\&
+.Sq x
+becomes
+.Sq [xX] .
+When it appears inside a bracket expression,
+all case counterparts of it are added to the bracket expression,
+so that, for example,
+.Sq [x]
+becomes
+.Sq [xX]
+and
+.Sq [^x]
+becomes
+.Sq [^xX] .
+.Pp
+No particular limit is imposed on the length of REs**.
+Programs intended to be portable should not employ REs longer
+than 256 bytes,
+as an implementation can refuse to accept such REs and remain
+POSIX-compliant.
+.Pp
+The following is a list of extended regular expressions:
+.Bl -tag -width Ds
+.It Ar c
+Any character
+.Ar c
+not listed below matches itself.
+.It \e Ns Ar c
+Any backslash-escaped character
+.Ar c
+matches itself.
+.It \&.
+Matches any single character that is not a newline
+.Pq Sq \en .
+.It Bq Ar char-class
+Matches any single character in
+.Ar char-class .
+To include a
+.Ql \&]
+in
+.Ar char-class ,
+it must be the first character.
+A range of characters may be specified by separating the end characters
+of the range with a
+.Ql - ;
+e.g.\&
+.Ar a-z
+specifies the lower case characters.
+The following literal expressions can also be used in
+.Ar char-class
+to specify sets of characters:
+.Bd -unfilled -offset indent
+[:alnum:] [:cntrl:] [:lower:] [:space:]
+[:alpha:] [:digit:] [:print:] [:upper:]
+[:blank:] [:graph:] [:punct:] [:xdigit:]
+.Ed
+.Pp
+If
+.Ql -
+appears as the first or last character of
+.Ar char-class ,
+then it matches itself.
+All other characters in
+.Ar char-class
+match themselves.
+.Pp
+Patterns in
+.Ar char-class
+of the form
+.Eo [.
+.Ar col-elm
+.Ec .]\&
+or
+.Eo [=
+.Ar col-elm
+.Ec =]\& ,
+where
+.Ar col-elm
+is a collating element, are interpreted according to
+.Xr setlocale 3
+.Pq not currently supported .
+.It Bq ^ Ns Ar char-class
+Matches any single character, other than newline, not in
+.Ar char-class .
+.Ar char-class
+is defined as above.
+.It ^
+If
+.Sq ^
+is the first character of a regular expression, then it
+anchors the regular expression to the beginning of a line.
+Otherwise, it matches itself.
+.It $
+If
+.Sq $
+is the last character of a regular expression,
+it anchors the regular expression to the end of a line.
+Otherwise, it matches itself.
+.It [[:<:]]
+Anchors the single character regular expression or subexpression
+immediately following it to the beginning of a word.
+.It [[:>:]]
+Anchors the single character regular expression or subexpression
+immediately following it to the end of a word.
+.It Pq Ar re
+Defines a subexpression
+.Ar re .
+Any set of characters enclosed in parentheses
+matches whatever the set of characters without parentheses matches
+(that is a long-winded way of saying the constructs
+.Sq (re)
+and
+.Sq re
+match identically).
+.It *
+Matches the single character regular expression or subexpression
+immediately preceding it zero or more times.
+If
+.Sq *
+is the first character of a regular expression or subexpression,
+then it matches itself.
+The
+.Sq *
+operator sometimes yields unexpected results.
+For example, the regular expression
+.Ar b*
+matches the beginning of the string
+.Qq abbb
+(as opposed to the substring
+.Qq bbb ) ,
+since a null match is the only leftmost match.
+.It +
+Matches the singular character regular expression
+or subexpression immediately preceding it
+one or more times.
+.It ?
+Matches the singular character regular expression
+or subexpression immediately preceding it
+0 or 1 times.
+.Sm off
+.It Xo
+.Pf { Ar n , m No }\ \&
+.Pf { Ar n , No }\ \&
+.Pf { Ar n No }
+.Xc
+.Sm on
+Matches the single character regular expression or subexpression
+immediately preceding it at least
+.Ar n
+and at most
+.Ar m
+times.
+If
+.Ar m
+is omitted, then it matches at least
+.Ar n
+times.
+If the comma is also omitted, then it matches exactly
+.Ar n
+times.
+.It \*(Ba
+Used to separate patterns.
+For example,
+the pattern
+.Sq cat\*(Badog
+matches either
+.Sq cat
+or
+.Sq dog .
+.El
+.Sh BASIC REGULAR EXPRESSIONS
+Basic regular expressions differ in several respects:
+.Bl -bullet -offset 3n
+.It
+.Sq \*(Ba ,
+.Sq + ,
+and
+.Sq ?\&
+are ordinary characters and there is no equivalent
+for their functionality.
+.It
+The delimiters for bounds are
+.Sq \e{
+and
+.Sq \e} ,
+with
+.Sq {
+and
+.Sq }
+by themselves ordinary characters.
+.It
+The parentheses for nested subexpressions are
+.Sq \e(
+and
+.Sq \e) ,
+with
+.Sq (
+and
+.Sq )\&
+by themselves ordinary characters.
+.It
+.Sq ^
+is an ordinary character except at the beginning of the
+RE or** the beginning of a parenthesized subexpression.
+.It
+.Sq $
+is an ordinary character except at the end of the
+RE or** the end of a parenthesized subexpression.
+.It
+.Sq *
+is an ordinary character if it appears at the beginning of the
+RE or the beginning of a parenthesized subexpression
+(after a possible leading
+.Sq ^ ) .
+.It
+Finally, there is one new type of atom, a
+.Em back-reference :
+.Sq \e
+followed by a non-zero decimal digit
+.Ar d
+matches the same sequence of characters matched by the
+.Ar d Ns th
+parenthesized subexpression
+(numbering subexpressions by the positions of their opening parentheses,
+left to right),
+so that, for example,
+.Sq \e([bc]\e)\e1
+matches
+.Sq bb\&
+or
+.Sq cc
+but not
+.Sq bc .
+.El
+.Pp
+The following is a list of basic regular expressions:
+.Bl -tag -width Ds
+.It Ar c
+Any character
+.Ar c
+not listed below matches itself.
+.It \e Ns Ar c
+Any backslash-escaped character
+.Ar c ,
+except for
+.Sq { ,
+.Sq } ,
+.Sq \&( ,
+and
+.Sq \&) ,
+matches itself.
+.It \&.
+Matches any single character that is not a newline
+.Pq Sq \en .
+.It Bq Ar char-class
+Matches any single character in
+.Ar char-class .
+To include a
+.Ql \&]
+in
+.Ar char-class ,
+it must be the first character.
+A range of characters may be specified by separating the end characters
+of the range with a
+.Ql - ;
+e.g.\&
+.Ar a-z
+specifies the lower case characters.
+The following literal expressions can also be used in
+.Ar char-class
+to specify sets of characters:
+.Bd -unfilled -offset indent
+[:alnum:] [:cntrl:] [:lower:] [:space:]
+[:alpha:] [:digit:] [:print:] [:upper:]
+[:blank:] [:graph:] [:punct:] [:xdigit:]
+.Ed
+.Pp
+If
+.Ql -
+appears as the first or last character of
+.Ar char-class ,
+then it matches itself.
+All other characters in
+.Ar char-class
+match themselves.
+.Pp
+Patterns in
+.Ar char-class
+of the form
+.Eo [.
+.Ar col-elm
+.Ec .]\&
+or
+.Eo [=
+.Ar col-elm
+.Ec =]\& ,
+where
+.Ar col-elm
+is a collating element, are interpreted according to
+.Xr setlocale 3
+.Pq not currently supported .
+.It Bq ^ Ns Ar char-class
+Matches any single character, other than newline, not in
+.Ar char-class .
+.Ar char-class
+is defined as above.
+.It ^
+If
+.Sq ^
+is the first character of a regular expression, then it
+anchors the regular expression to the beginning of a line.
+Otherwise, it matches itself.
+.It $
+If
+.Sq $
+is the last character of a regular expression,
+it anchors the regular expression to the end of a line.
+Otherwise, it matches itself.
+.It [[:<:]]
+Anchors the single character regular expression or subexpression
+immediately following it to the beginning of a word.
+.It [[:>:]]
+Anchors the single character regular expression or subexpression
+immediately following it to the end of a word.
+.It \e( Ns Ar re Ns \e)
+Defines a subexpression
+.Ar re .
+Subexpressions may be nested.
+A subsequent backreference of the form
+.Pf \e Ns Ar n ,
+where
+.Ar n
+is a number in the range [1,9], expands to the text matched by the
+.Ar n Ns th
+subexpression.
+For example, the regular expression
+.Ar \e(.*\e)\e1
+matches any string consisting of identical adjacent substrings.
+Subexpressions are ordered relative to their left delimiter.
+.It *
+Matches the single character regular expression or subexpression
+immediately preceding it zero or more times.
+If
+.Sq *
+is the first character of a regular expression or subexpression,
+then it matches itself.
+The
+.Sq *
+operator sometimes yields unexpected results.
+For example, the regular expression
+.Ar b*
+matches the beginning of the string
+.Qq abbb
+(as opposed to the substring
+.Qq bbb ) ,
+since a null match is the only leftmost match.
+.Sm off
+.It Xo
+.Pf \e{ Ar n , m No \e}\ \&
+.Pf \e{ Ar n , No \e}\ \&
+.Pf \e{ Ar n No \e}
+.Xc
+.Sm on
+Matches the single character regular expression or subexpression
+immediately preceding it at least
+.Ar n
+and at most
+.Ar m
+times.
+If
+.Ar m
+is omitted, then it matches at least
+.Ar n
+times.
+If the comma is also omitted, then it matches exactly
+.Ar n
+times.
+.El
+.Sh SEE ALSO
+.Xr ctype 3 ,
+.Xr regex 3
+.Sh STANDARDS
+.St -p1003.1-2004 :
+Base Definitions, Chapter 9 (Regular Expressions).
+.Sh BUGS
+Having two kinds of REs is a botch.
+.Pp
+The current POSIX spec says that
+.Sq )\&
+is an ordinary character in the absence of an unmatched
+.Sq ( ;
+this was an unintentional result of a wording error,
+and change is likely.
+Avoid relying on it.
+.Pp
+Back-references are a dreadful botch,
+posing major problems for efficient implementations.
+They are also somewhat vaguely defined
+(does
+.Sq a\e(\e(b\e)*\e2\e)*d
+match
+.Sq abbbd ? ) .
+Avoid using them.
+.Pp
+POSIX's specification of case-independent matching is vague.
+The
+.Dq one case implies all cases
+definition given above
+is the current consensus among implementors as to the right interpretation.
+.Pp
+The syntax for word boundaries is incredibly ugly.
diff --git a/docs/subsystems.rst b/docs/subsystems.rst
new file mode 100644
index 00000000000..be33295a151
--- /dev/null
+++ b/docs/subsystems.rst
@@ -0,0 +1,91 @@
+.. _subsystems:
+
+Subsystem Documentation
+=======================
+
+.. toctree::
+ :hidden:
+
+ AliasAnalysis
+ BitCodeFormat
+ BranchWeightMetadata
+ Bugpoint
+ CodeGenerator
+ ExceptionHandling
+ LinkTimeOptimization
+ SegmentedStacks
+ TableGenFundamentals
+
+* `Writing an LLVM Pass <WritingAnLLVMPass.html>`_
+
+ Information on how to write LLVM transformations and analyses.
+
+* `Writing an LLVM Backend <WritingAnLLVMBackend.html>`_
+
+ Information on how to write LLVM backends for machine targets.
+
+* :ref:`code_generator`
+
+ The design and implementation of the LLVM code generator. Useful if you are
+ working on retargetting LLVM to a new architecture, designing a new codegen
+ pass, or enhancing existing components.
+
+* :ref:`tablegen`
+
+ Describes the TableGen tool, which is used heavily by the LLVM code
+ generator.
+
+* :ref:`alias_analysis`
+
+ Information on how to write a new alias analysis implementation or how to
+ use existing analyses.
+
+* `Accurate Garbage Collection with LLVM <GarbageCollection.html>`_
+
+ The interfaces source-language compilers should use for compiling GC'd
+ programs.
+
+* `Source Level Debugging with LLVM <SourceLevelDebugging.html>`_
+
+ This document describes the design and philosophy behind the LLVM
+ source-level debugger.
+
+* :ref:`exception_handling`
+
+ This document describes the design and implementation of exception handling
+ in LLVM.
+
+* :ref:`bugpoint`
+
+ Automatic bug finder and test-case reducer description and usage
+ information.
+
+* :ref:`bitcode_format`
+
+ This describes the file format and encoding used for LLVM "bc" files.
+
+* `System Library <SystemLibrary.html>`_
+
+ This document describes the LLVM System Library (<tt>lib/System</tt>) and
+ how to keep LLVM source code portable
+
+* :ref:`lto`
+
+ This document describes the interface between LLVM intermodular optimizer
+ and the linker and its design
+
+* `The LLVM gold plugin <GoldPlugin.html>`_
+
+ How to build your programs with link-time optimization on Linux.
+
+* `The GDB JIT interface <DebuggingJITedCode.html>`_
+
+ How to debug JITed code with GDB.
+
+* :ref:`branch_weight`
+
+ Provides information about Branch Prediction Information.
+
+* :ref:`segmented_stacks`
+
+ This document describes segmented stacks and how they are used in LLVM.
diff --git a/docs/tutorial/LangImpl1.html b/docs/tutorial/LangImpl1.html
new file mode 100644
index 00000000000..a65646f2866
--- /dev/null
+++ b/docs/tutorial/LangImpl1.html
@@ -0,0 +1,348 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Tutorial Introduction and the Lexer</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Tutorial Introduction and the Lexer</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 1
+ <ol>
+ <li><a href="#intro">Tutorial Introduction</a></li>
+ <li><a href="#language">The Basic Language</a></li>
+ <li><a href="#lexer">The Lexer</a></li>
+ </ol>
+</li>
+<li><a href="LangImpl2.html">Chapter 2</a>: Implementing a Parser and AST</li>
+</ul>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Tutorial Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to the "Implementing a language with LLVM" tutorial. This tutorial
+runs through the implementation of a simple language, showing how fun and
+easy it can be. This tutorial will get you up and started as well as help to
+build a framework you can extend to other languages. The code in this tutorial
+can also be used as a playground to hack on other LLVM specific things.
+</p>
+
+<p>
+The goal of this tutorial is to progressively unveil our language, describing
+how it is built up over time. This will let us cover a fairly broad range of
+language design and LLVM-specific usage issues, showing and explaining the code
+for it all along the way, without overwhelming you with tons of details up
+front.</p>
+
+<p>It is useful to point out ahead of time that this tutorial is really about
+teaching compiler techniques and LLVM specifically, <em>not</em> about teaching
+modern and sane software engineering principles. In practice, this means that
+we'll take a number of shortcuts to simplify the exposition. For example, the
+code leaks memory, uses global variables all over the place, doesn't use nice
+design patterns like <a
+href="http://en.wikipedia.org/wiki/Visitor_pattern">visitors</a>, etc... but it
+is very simple. If you dig in and use the code as a basis for future projects,
+fixing these deficiencies shouldn't be hard.</p>
+
+<p>I've tried to put this tutorial together in a way that makes chapters easy to
+skip over if you are already familiar with or are uninterested in the various
+pieces. The structure of the tutorial is:
+</p>
+
+<ul>
+<li><b><a href="#language">Chapter #1</a>: Introduction to the Kaleidoscope
+language, and the definition of its Lexer</b> - This shows where we are going
+and the basic functionality that we want it to do. In order to make this
+tutorial maximally understandable and hackable, we choose to implement
+everything in C++ instead of using lexer and parser generators. LLVM obviously
+works just fine with such tools, feel free to use one if you prefer.</li>
+<li><b><a href="LangImpl2.html">Chapter #2</a>: Implementing a Parser and
+AST</b> - With the lexer in place, we can talk about parsing techniques and
+basic AST construction. This tutorial describes recursive descent parsing and
+operator precedence parsing. Nothing in Chapters 1 or 2 is LLVM-specific,
+the code doesn't even link in LLVM at this point. :)</li>
+<li><b><a href="LangImpl3.html">Chapter #3</a>: Code generation to LLVM IR</b> -
+With the AST ready, we can show off how easy generation of LLVM IR really
+is.</li>
+<li><b><a href="LangImpl4.html">Chapter #4</a>: Adding JIT and Optimizer
+Support</b> - Because a lot of people are interested in using LLVM as a JIT,
+we'll dive right into it and show you the 3 lines it takes to add JIT support.
+LLVM is also useful in many other ways, but this is one simple and "sexy" way
+to shows off its power. :)</li>
+<li><b><a href="LangImpl5.html">Chapter #5</a>: Extending the Language: Control
+Flow</b> - With the language up and running, we show how to extend it with
+control flow operations (if/then/else and a 'for' loop). This gives us a chance
+to talk about simple SSA construction and control flow.</li>
+<li><b><a href="LangImpl6.html">Chapter #6</a>: Extending the Language:
+User-defined Operators</b> - This is a silly but fun chapter that talks about
+extending the language to let the user program define their own arbitrary
+unary and binary operators (with assignable precedence!). This lets us build a
+significant piece of the "language" as library routines.</li>
+<li><b><a href="LangImpl7.html">Chapter #7</a>: Extending the Language: Mutable
+Variables</b> - This chapter talks about adding user-defined local variables
+along with an assignment operator. The interesting part about this is how
+easy and trivial it is to construct SSA form in LLVM: no, LLVM does <em>not</em>
+require your front-end to construct SSA form!</li>
+<li><b><a href="LangImpl8.html">Chapter #8</a>: Conclusion and other useful LLVM
+tidbits</b> - This chapter wraps up the series by talking about potential
+ways to extend the language, but also includes a bunch of pointers to info about
+"special topics" like adding garbage collection support, exceptions, debugging,
+support for "spaghetti stacks", and a bunch of other tips and tricks.</li>
+
+</ul>
+
+<p>By the end of the tutorial, we'll have written a bit less than 700 lines of
+non-comment, non-blank, lines of code. With this small amount of code, we'll
+have built up a very reasonable compiler for a non-trivial language including
+a hand-written lexer, parser, AST, as well as code generation support with a JIT
+compiler. While other systems may have interesting "hello world" tutorials,
+I think the breadth of this tutorial is a great testament to the strengths of
+LLVM and why you should consider it if you're interested in language or compiler
+design.</p>
+
+<p>A note about this tutorial: we expect you to extend the language and play
+with it on your own. Take the code and go crazy hacking away at it, compilers
+don't need to be scary creatures - it can be a lot of fun to play with
+languages!</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="language">The Basic Language</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>This tutorial will be illustrated with a toy language that we'll call
+"<a href="http://en.wikipedia.org/wiki/Kaleidoscope">Kaleidoscope</a>" (derived
+from "meaning beautiful, form, and view").
+Kaleidoscope is a procedural language that allows you to define functions, use
+conditionals, math, etc. Over the course of the tutorial, we'll extend
+Kaleidoscope to support the if/then/else construct, a for loop, user defined
+operators, JIT compilation with a simple command line interface, etc.</p>
+
+<p>Because we want to keep things simple, the only datatype in Kaleidoscope is a
+64-bit floating point type (aka 'double' in C parlance). As such, all values
+are implicitly double precision and the language doesn't require type
+declarations. This gives the language a very nice and simple syntax. For
+example, the following simple example computes <a
+href="http://en.wikipedia.org/wiki/Fibonacci_number">Fibonacci numbers:</a></p>
+
+<div class="doc_code">
+<pre>
+# Compute the x'th fibonacci number.
+def fib(x)
+ if x &lt; 3 then
+ 1
+ else
+ fib(x-1)+fib(x-2)
+
+# This expression will compute the 40th number.
+fib(40)
+</pre>
+</div>
+
+<p>We also allow Kaleidoscope to call into standard library functions (the LLVM
+JIT makes this completely trivial). This means that you can use the 'extern'
+keyword to define a function before you use it (this is also useful for mutually
+recursive functions). For example:</p>
+
+<div class="doc_code">
+<pre>
+extern sin(arg);
+extern cos(arg);
+extern atan2(arg1 arg2);
+
+atan2(sin(.4), cos(42))
+</pre>
+</div>
+
+<p>A more interesting example is included in Chapter 6 where we write a little
+Kaleidoscope application that <a href="LangImpl6.html#example">displays
+a Mandelbrot Set</a> at various levels of magnification.</p>
+
+<p>Lets dive into the implementation of this language!</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="lexer">The Lexer</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>When it comes to implementing a language, the first thing needed is
+the ability to process a text file and recognize what it says. The traditional
+way to do this is to use a "<a
+href="http://en.wikipedia.org/wiki/Lexical_analysis">lexer</a>" (aka 'scanner')
+to break the input up into "tokens". Each token returned by the lexer includes
+a token code and potentially some metadata (e.g. the numeric value of a number).
+First, we define the possibilities:
+</p>
+
+<div class="doc_code">
+<pre>
+// The lexer returns tokens [0-255] if it is an unknown character, otherwise one
+// of these for known things.
+enum Token {
+ tok_eof = -1,
+
+ // commands
+ tok_def = -2, tok_extern = -3,
+
+ // primary
+ tok_identifier = -4, tok_number = -5,
+};
+
+static std::string IdentifierStr; // Filled in if tok_identifier
+static double NumVal; // Filled in if tok_number
+</pre>
+</div>
+
+<p>Each token returned by our lexer will either be one of the Token enum values
+or it will be an 'unknown' character like '+', which is returned as its ASCII
+value. If the current token is an identifier, the <tt>IdentifierStr</tt>
+global variable holds the name of the identifier. If the current token is a
+numeric literal (like 1.0), <tt>NumVal</tt> holds its value. Note that we use
+global variables for simplicity, this is not the best choice for a real language
+implementation :).
+</p>
+
+<p>The actual implementation of the lexer is a single function named
+<tt>gettok</tt>. The <tt>gettok</tt> function is called to return the next token
+from standard input. Its definition starts as:</p>
+
+<div class="doc_code">
+<pre>
+/// gettok - Return the next token from standard input.
+static int gettok() {
+ static int LastChar = ' ';
+
+ // Skip any whitespace.
+ while (isspace(LastChar))
+ LastChar = getchar();
+</pre>
+</div>
+
+<p>
+<tt>gettok</tt> works by calling the C <tt>getchar()</tt> function to read
+characters one at a time from standard input. It eats them as it recognizes
+them and stores the last character read, but not processed, in LastChar. The
+first thing that it has to do is ignore whitespace between tokens. This is
+accomplished with the loop above.</p>
+
+<p>The next thing <tt>gettok</tt> needs to do is recognize identifiers and
+specific keywords like "def". Kaleidoscope does this with this simple loop:</p>
+
+<div class="doc_code">
+<pre>
+ if (isalpha(LastChar)) { // identifier: [a-zA-Z][a-zA-Z0-9]*
+ IdentifierStr = LastChar;
+ while (isalnum((LastChar = getchar())))
+ IdentifierStr += LastChar;
+
+ if (IdentifierStr == "def") return tok_def;
+ if (IdentifierStr == "extern") return tok_extern;
+ return tok_identifier;
+ }
+</pre>
+</div>
+
+<p>Note that this code sets the '<tt>IdentifierStr</tt>' global whenever it
+lexes an identifier. Also, since language keywords are matched by the same
+loop, we handle them here inline. Numeric values are similar:</p>
+
+<div class="doc_code">
+<pre>
+ if (isdigit(LastChar) || LastChar == '.') { // Number: [0-9.]+
+ std::string NumStr;
+ do {
+ NumStr += LastChar;
+ LastChar = getchar();
+ } while (isdigit(LastChar) || LastChar == '.');
+
+ NumVal = strtod(NumStr.c_str(), 0);
+ return tok_number;
+ }
+</pre>
+</div>
+
+<p>This is all pretty straight-forward code for processing input. When reading
+a numeric value from input, we use the C <tt>strtod</tt> function to convert it
+to a numeric value that we store in <tt>NumVal</tt>. Note that this isn't doing
+sufficient error checking: it will incorrectly read "1.23.45.67" and handle it as
+if you typed in "1.23". Feel free to extend it :). Next we handle comments:
+</p>
+
+<div class="doc_code">
+<pre>
+ if (LastChar == '#') {
+ // Comment until end of line.
+ do LastChar = getchar();
+ while (LastChar != EOF &amp;&amp; LastChar != '\n' &amp;&amp; LastChar != '\r');
+
+ if (LastChar != EOF)
+ return gettok();
+ }
+</pre>
+</div>
+
+<p>We handle comments by skipping to the end of the line and then return the
+next token. Finally, if the input doesn't match one of the above cases, it is
+either an operator character like '+' or the end of the file. These are handled
+with this code:</p>
+
+<div class="doc_code">
+<pre>
+ // Check for end of file. Don't eat the EOF.
+ if (LastChar == EOF)
+ return tok_eof;
+
+ // Otherwise, just return the character as its ascii value.
+ int ThisChar = LastChar;
+ LastChar = getchar();
+ return ThisChar;
+}
+</pre>
+</div>
+
+<p>With this, we have the complete lexer for the basic Kaleidoscope language
+(the <a href="LangImpl2.html#code">full code listing</a> for the Lexer is
+available in the <a href="LangImpl2.html">next chapter</a> of the tutorial).
+Next we'll <a href="LangImpl2.html">build a simple parser that uses this to
+build an Abstract Syntax Tree</a>. When we have that, we'll include a driver
+so that you can use the lexer and parser together.
+</p>
+
+<a href="LangImpl2.html">Next: Implementing a Parser and AST</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/LangImpl2.html b/docs/tutorial/LangImpl2.html
new file mode 100644
index 00000000000..292dd4e516c
--- /dev/null
+++ b/docs/tutorial/LangImpl2.html
@@ -0,0 +1,1231 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Implementing a Parser and AST</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Implementing a Parser and AST</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 2
+ <ol>
+ <li><a href="#intro">Chapter 2 Introduction</a></li>
+ <li><a href="#ast">The Abstract Syntax Tree (AST)</a></li>
+ <li><a href="#parserbasics">Parser Basics</a></li>
+ <li><a href="#parserprimexprs">Basic Expression Parsing</a></li>
+ <li><a href="#parserbinops">Binary Expression Parsing</a></li>
+ <li><a href="#parsertop">Parsing the Rest</a></li>
+ <li><a href="#driver">The Driver</a></li>
+ <li><a href="#conclusions">Conclusions</a></li>
+ <li><a href="#code">Full Code Listing</a></li>
+ </ol>
+</li>
+<li><a href="LangImpl3.html">Chapter 3</a>: Code generation to LLVM IR</li>
+</ul>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Chapter 2 Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to Chapter 2 of the "<a href="index.html">Implementing a language
+with LLVM</a>" tutorial. This chapter shows you how to use the lexer, built in
+<a href="LangImpl1.html">Chapter 1</a>, to build a full <a
+href="http://en.wikipedia.org/wiki/Parsing">parser</a> for
+our Kaleidoscope language. Once we have a parser, we'll define and build an <a
+href="http://en.wikipedia.org/wiki/Abstract_syntax_tree">Abstract Syntax
+Tree</a> (AST).</p>
+
+<p>The parser we will build uses a combination of <a
+href="http://en.wikipedia.org/wiki/Recursive_descent_parser">Recursive Descent
+Parsing</a> and <a href=
+"http://en.wikipedia.org/wiki/Operator-precedence_parser">Operator-Precedence
+Parsing</a> to parse the Kaleidoscope language (the latter for
+binary expressions and the former for everything else). Before we get to
+parsing though, lets talk about the output of the parser: the Abstract Syntax
+Tree.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="ast">The Abstract Syntax Tree (AST)</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The AST for a program captures its behavior in such a way that it is easy for
+later stages of the compiler (e.g. code generation) to interpret. We basically
+want one object for each construct in the language, and the AST should closely
+model the language. In Kaleidoscope, we have expressions, a prototype, and a
+function object. We'll start with expressions first:</p>
+
+<div class="doc_code">
+<pre>
+/// ExprAST - Base class for all expression nodes.
+class ExprAST {
+public:
+ virtual ~ExprAST() {}
+};
+
+/// NumberExprAST - Expression class for numeric literals like "1.0".
+class NumberExprAST : public ExprAST {
+ double Val;
+public:
+ NumberExprAST(double val) : Val(val) {}
+};
+</pre>
+</div>
+
+<p>The code above shows the definition of the base ExprAST class and one
+subclass which we use for numeric literals. The important thing to note about
+this code is that the NumberExprAST class captures the numeric value of the
+literal as an instance variable. This allows later phases of the compiler to
+know what the stored numeric value is.</p>
+
+<p>Right now we only create the AST, so there are no useful accessor methods on
+them. It would be very easy to add a virtual method to pretty print the code,
+for example. Here are the other expression AST node definitions that we'll use
+in the basic form of the Kaleidoscope language:
+</p>
+
+<div class="doc_code">
+<pre>
+/// VariableExprAST - Expression class for referencing a variable, like "a".
+class VariableExprAST : public ExprAST {
+ std::string Name;
+public:
+ VariableExprAST(const std::string &amp;name) : Name(name) {}
+};
+
+/// BinaryExprAST - Expression class for a binary operator.
+class BinaryExprAST : public ExprAST {
+ char Op;
+ ExprAST *LHS, *RHS;
+public:
+ BinaryExprAST(char op, ExprAST *lhs, ExprAST *rhs)
+ : Op(op), LHS(lhs), RHS(rhs) {}
+};
+
+/// CallExprAST - Expression class for function calls.
+class CallExprAST : public ExprAST {
+ std::string Callee;
+ std::vector&lt;ExprAST*&gt; Args;
+public:
+ CallExprAST(const std::string &amp;callee, std::vector&lt;ExprAST*&gt; &amp;args)
+ : Callee(callee), Args(args) {}
+};
+</pre>
+</div>
+
+<p>This is all (intentionally) rather straight-forward: variables capture the
+variable name, binary operators capture their opcode (e.g. '+'), and calls
+capture a function name as well as a list of any argument expressions. One thing
+that is nice about our AST is that it captures the language features without
+talking about the syntax of the language. Note that there is no discussion about
+precedence of binary operators, lexical structure, etc.</p>
+
+<p>For our basic language, these are all of the expression nodes we'll define.
+Because it doesn't have conditional control flow, it isn't Turing-complete;
+we'll fix that in a later installment. The two things we need next are a way
+to talk about the interface to a function, and a way to talk about functions
+themselves:</p>
+
+<div class="doc_code">
+<pre>
+/// PrototypeAST - This class represents the "prototype" for a function,
+/// which captures its name, and its argument names (thus implicitly the number
+/// of arguments the function takes).
+class PrototypeAST {
+ std::string Name;
+ std::vector&lt;std::string&gt; Args;
+public:
+ PrototypeAST(const std::string &amp;name, const std::vector&lt;std::string&gt; &amp;args)
+ : Name(name), Args(args) {}
+};
+
+/// FunctionAST - This class represents a function definition itself.
+class FunctionAST {
+ PrototypeAST *Proto;
+ ExprAST *Body;
+public:
+ FunctionAST(PrototypeAST *proto, ExprAST *body)
+ : Proto(proto), Body(body) {}
+};
+</pre>
+</div>
+
+<p>In Kaleidoscope, functions are typed with just a count of their arguments.
+Since all values are double precision floating point, the type of each argument
+doesn't need to be stored anywhere. In a more aggressive and realistic
+language, the "ExprAST" class would probably have a type field.</p>
+
+<p>With this scaffolding, we can now talk about parsing expressions and function
+bodies in Kaleidoscope.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="parserbasics">Parser Basics</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Now that we have an AST to build, we need to define the parser code to build
+it. The idea here is that we want to parse something like "x+y" (which is
+returned as three tokens by the lexer) into an AST that could be generated with
+calls like this:</p>
+
+<div class="doc_code">
+<pre>
+ ExprAST *X = new VariableExprAST("x");
+ ExprAST *Y = new VariableExprAST("y");
+ ExprAST *Result = new BinaryExprAST('+', X, Y);
+</pre>
+</div>
+
+<p>In order to do this, we'll start by defining some basic helper routines:</p>
+
+<div class="doc_code">
+<pre>
+/// CurTok/getNextToken - Provide a simple token buffer. CurTok is the current
+/// token the parser is looking at. getNextToken reads another token from the
+/// lexer and updates CurTok with its results.
+static int CurTok;
+static int getNextToken() {
+ return CurTok = gettok();
+}
+</pre>
+</div>
+
+<p>
+This implements a simple token buffer around the lexer. This allows
+us to look one token ahead at what the lexer is returning. Every function in
+our parser will assume that CurTok is the current token that needs to be
+parsed.</p>
+
+<div class="doc_code">
+<pre>
+
+/// Error* - These are little helper functions for error handling.
+ExprAST *Error(const char *Str) { fprintf(stderr, "Error: %s\n", Str);return 0;}
+PrototypeAST *ErrorP(const char *Str) { Error(Str); return 0; }
+FunctionAST *ErrorF(const char *Str) { Error(Str); return 0; }
+</pre>
+</div>
+
+<p>
+The <tt>Error</tt> routines are simple helper routines that our parser will use
+to handle errors. The error recovery in our parser will not be the best and
+is not particular user-friendly, but it will be enough for our tutorial. These
+routines make it easier to handle errors in routines that have various return
+types: they always return null.</p>
+
+<p>With these basic helper functions, we can implement the first
+piece of our grammar: numeric literals.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="parserprimexprs">Basic Expression Parsing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>We start with numeric literals, because they are the simplest to process.
+For each production in our grammar, we'll define a function which parses that
+production. For numeric literals, we have:
+</p>
+
+<div class="doc_code">
+<pre>
+/// numberexpr ::= number
+static ExprAST *ParseNumberExpr() {
+ ExprAST *Result = new NumberExprAST(NumVal);
+ getNextToken(); // consume the number
+ return Result;
+}
+</pre>
+</div>
+
+<p>This routine is very simple: it expects to be called when the current token
+is a <tt>tok_number</tt> token. It takes the current number value, creates
+a <tt>NumberExprAST</tt> node, advances the lexer to the next token, and finally
+returns.</p>
+
+<p>There are some interesting aspects to this. The most important one is that
+this routine eats all of the tokens that correspond to the production and
+returns the lexer buffer with the next token (which is not part of the grammar
+production) ready to go. This is a fairly standard way to go for recursive
+descent parsers. For a better example, the parenthesis operator is defined like
+this:</p>
+
+<div class="doc_code">
+<pre>
+/// parenexpr ::= '(' expression ')'
+static ExprAST *ParseParenExpr() {
+ getNextToken(); // eat (.
+ ExprAST *V = ParseExpression();
+ if (!V) return 0;
+
+ if (CurTok != ')')
+ return Error("expected ')'");
+ getNextToken(); // eat ).
+ return V;
+}
+</pre>
+</div>
+
+<p>This function illustrates a number of interesting things about the
+parser:</p>
+
+<p>
+1) It shows how we use the Error routines. When called, this function expects
+that the current token is a '(' token, but after parsing the subexpression, it
+is possible that there is no ')' waiting. For example, if the user types in
+"(4 x" instead of "(4)", the parser should emit an error. Because errors can
+occur, the parser needs a way to indicate that they happened: in our parser, we
+return null on an error.</p>
+
+<p>2) Another interesting aspect of this function is that it uses recursion by
+calling <tt>ParseExpression</tt> (we will soon see that <tt>ParseExpression</tt> can call
+<tt>ParseParenExpr</tt>). This is powerful because it allows us to handle
+recursive grammars, and keeps each production very simple. Note that
+parentheses do not cause construction of AST nodes themselves. While we could
+do it this way, the most important role of parentheses are to guide the parser
+and provide grouping. Once the parser constructs the AST, parentheses are not
+needed.</p>
+
+<p>The next simple production is for handling variable references and function
+calls:</p>
+
+<div class="doc_code">
+<pre>
+/// identifierexpr
+/// ::= identifier
+/// ::= identifier '(' expression* ')'
+static ExprAST *ParseIdentifierExpr() {
+ std::string IdName = IdentifierStr;
+
+ getNextToken(); // eat identifier.
+
+ if (CurTok != '(') // Simple variable ref.
+ return new VariableExprAST(IdName);
+
+ // Call.
+ getNextToken(); // eat (
+ std::vector&lt;ExprAST*&gt; Args;
+ if (CurTok != ')') {
+ while (1) {
+ ExprAST *Arg = ParseExpression();
+ if (!Arg) return 0;
+ Args.push_back(Arg);
+
+ if (CurTok == ')') break;
+
+ if (CurTok != ',')
+ return Error("Expected ')' or ',' in argument list");
+ getNextToken();
+ }
+ }
+
+ // Eat the ')'.
+ getNextToken();
+
+ return new CallExprAST(IdName, Args);
+}
+</pre>
+</div>
+
+<p>This routine follows the same style as the other routines. (It expects to be
+called if the current token is a <tt>tok_identifier</tt> token). It also has
+recursion and error handling. One interesting aspect of this is that it uses
+<em>look-ahead</em> to determine if the current identifier is a stand alone
+variable reference or if it is a function call expression. It handles this by
+checking to see if the token after the identifier is a '(' token, constructing
+either a <tt>VariableExprAST</tt> or <tt>CallExprAST</tt> node as appropriate.
+</p>
+
+<p>Now that we have all of our simple expression-parsing logic in place, we can
+define a helper function to wrap it together into one entry point. We call this
+class of expressions "primary" expressions, for reasons that will become more
+clear <a href="LangImpl6.html#unary">later in the tutorial</a>. In order to
+parse an arbitrary primary expression, we need to determine what sort of
+expression it is:</p>
+
+<div class="doc_code">
+<pre>
+/// primary
+/// ::= identifierexpr
+/// ::= numberexpr
+/// ::= parenexpr
+static ExprAST *ParsePrimary() {
+ switch (CurTok) {
+ default: return Error("unknown token when expecting an expression");
+ case tok_identifier: return ParseIdentifierExpr();
+ case tok_number: return ParseNumberExpr();
+ case '(': return ParseParenExpr();
+ }
+}
+</pre>
+</div>
+
+<p>Now that you see the definition of this function, it is more obvious why we
+can assume the state of CurTok in the various functions. This uses look-ahead
+to determine which sort of expression is being inspected, and then parses it
+with a function call.</p>
+
+<p>Now that basic expressions are handled, we need to handle binary expressions.
+They are a bit more complex.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="parserbinops">Binary Expression Parsing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Binary expressions are significantly harder to parse because they are often
+ambiguous. For example, when given the string "x+y*z", the parser can choose
+to parse it as either "(x+y)*z" or "x+(y*z)". With common definitions from
+mathematics, we expect the later parse, because "*" (multiplication) has
+higher <em>precedence</em> than "+" (addition).</p>
+
+<p>There are many ways to handle this, but an elegant and efficient way is to
+use <a href=
+"http://en.wikipedia.org/wiki/Operator-precedence_parser">Operator-Precedence
+Parsing</a>. This parsing technique uses the precedence of binary operators to
+guide recursion. To start with, we need a table of precedences:</p>
+
+<div class="doc_code">
+<pre>
+/// BinopPrecedence - This holds the precedence for each binary operator that is
+/// defined.
+static std::map&lt;char, int&gt; BinopPrecedence;
+
+/// GetTokPrecedence - Get the precedence of the pending binary operator token.
+static int GetTokPrecedence() {
+ if (!isascii(CurTok))
+ return -1;
+
+ // Make sure it's a declared binop.
+ int TokPrec = BinopPrecedence[CurTok];
+ if (TokPrec &lt;= 0) return -1;
+ return TokPrec;
+}
+
+int main() {
+ // Install standard binary operators.
+ // 1 is lowest precedence.
+ BinopPrecedence['&lt;'] = 10;
+ BinopPrecedence['+'] = 20;
+ BinopPrecedence['-'] = 20;
+ BinopPrecedence['*'] = 40; // highest.
+ ...
+}
+</pre>
+</div>
+
+<p>For the basic form of Kaleidoscope, we will only support 4 binary operators
+(this can obviously be extended by you, our brave and intrepid reader). The
+<tt>GetTokPrecedence</tt> function returns the precedence for the current token,
+or -1 if the token is not a binary operator. Having a map makes it easy to add
+new operators and makes it clear that the algorithm doesn't depend on the
+specific operators involved, but it would be easy enough to eliminate the map
+and do the comparisons in the <tt>GetTokPrecedence</tt> function. (Or just use
+a fixed-size array).</p>
+
+<p>With the helper above defined, we can now start parsing binary expressions.
+The basic idea of operator precedence parsing is to break down an expression
+with potentially ambiguous binary operators into pieces. Consider ,for example,
+the expression "a+b+(c+d)*e*f+g". Operator precedence parsing considers this
+as a stream of primary expressions separated by binary operators. As such,
+it will first parse the leading primary expression "a", then it will see the
+pairs [+, b] [+, (c+d)] [*, e] [*, f] and [+, g]. Note that because parentheses
+are primary expressions, the binary expression parser doesn't need to worry
+about nested subexpressions like (c+d) at all.
+</p>
+
+<p>
+To start, an expression is a primary expression potentially followed by a
+sequence of [binop,primaryexpr] pairs:</p>
+
+<div class="doc_code">
+<pre>
+/// expression
+/// ::= primary binoprhs
+///
+static ExprAST *ParseExpression() {
+ ExprAST *LHS = ParsePrimary();
+ if (!LHS) return 0;
+
+ return ParseBinOpRHS(0, LHS);
+}
+</pre>
+</div>
+
+<p><tt>ParseBinOpRHS</tt> is the function that parses the sequence of pairs for
+us. It takes a precedence and a pointer to an expression for the part that has been
+parsed so far. Note that "x" is a perfectly valid expression: As such, "binoprhs" is
+allowed to be empty, in which case it returns the expression that is passed into
+it. In our example above, the code passes the expression for "a" into
+<tt>ParseBinOpRHS</tt> and the current token is "+".</p>
+
+<p>The precedence value passed into <tt>ParseBinOpRHS</tt> indicates the <em>
+minimal operator precedence</em> that the function is allowed to eat. For
+example, if the current pair stream is [+, x] and <tt>ParseBinOpRHS</tt> is
+passed in a precedence of 40, it will not consume any tokens (because the
+precedence of '+' is only 20). With this in mind, <tt>ParseBinOpRHS</tt> starts
+with:</p>
+
+<div class="doc_code">
+<pre>
+/// binoprhs
+/// ::= ('+' primary)*
+static ExprAST *ParseBinOpRHS(int ExprPrec, ExprAST *LHS) {
+ // If this is a binop, find its precedence.
+ while (1) {
+ int TokPrec = GetTokPrecedence();
+
+ // If this is a binop that binds at least as tightly as the current binop,
+ // consume it, otherwise we are done.
+ if (TokPrec &lt; ExprPrec)
+ return LHS;
+</pre>
+</div>
+
+<p>This code gets the precedence of the current token and checks to see if if is
+too low. Because we defined invalid tokens to have a precedence of -1, this
+check implicitly knows that the pair-stream ends when the token stream runs out
+of binary operators. If this check succeeds, we know that the token is a binary
+operator and that it will be included in this expression:</p>
+
+<div class="doc_code">
+<pre>
+ // Okay, we know this is a binop.
+ int BinOp = CurTok;
+ getNextToken(); // eat binop
+
+ // Parse the primary expression after the binary operator.
+ ExprAST *RHS = ParsePrimary();
+ if (!RHS) return 0;
+</pre>
+</div>
+
+<p>As such, this code eats (and remembers) the binary operator and then parses
+the primary expression that follows. This builds up the whole pair, the first of
+which is [+, b] for the running example.</p>
+
+<p>Now that we parsed the left-hand side of an expression and one pair of the
+RHS sequence, we have to decide which way the expression associates. In
+particular, we could have "(a+b) binop unparsed" or "a + (b binop unparsed)".
+To determine this, we look ahead at "binop" to determine its precedence and
+compare it to BinOp's precedence (which is '+' in this case):</p>
+
+<div class="doc_code">
+<pre>
+ // If BinOp binds less tightly with RHS than the operator after RHS, let
+ // the pending operator take RHS as its LHS.
+ int NextPrec = GetTokPrecedence();
+ if (TokPrec &lt; NextPrec) {
+</pre>
+</div>
+
+<p>If the precedence of the binop to the right of "RHS" is lower or equal to the
+precedence of our current operator, then we know that the parentheses associate
+as "(a+b) binop ...". In our example, the current operator is "+" and the next
+operator is "+", we know that they have the same precedence. In this case we'll
+create the AST node for "a+b", and then continue parsing:</p>
+
+<div class="doc_code">
+<pre>
+ ... if body omitted ...
+ }
+
+ // Merge LHS/RHS.
+ LHS = new BinaryExprAST(BinOp, LHS, RHS);
+ } // loop around to the top of the while loop.
+}
+</pre>
+</div>
+
+<p>In our example above, this will turn "a+b+" into "(a+b)" and execute the next
+iteration of the loop, with "+" as the current token. The code above will eat,
+remember, and parse "(c+d)" as the primary expression, which makes the
+current pair equal to [+, (c+d)]. It will then evaluate the 'if' conditional above with
+"*" as the binop to the right of the primary. In this case, the precedence of "*" is
+higher than the precedence of "+" so the if condition will be entered.</p>
+
+<p>The critical question left here is "how can the if condition parse the right
+hand side in full"? In particular, to build the AST correctly for our example,
+it needs to get all of "(c+d)*e*f" as the RHS expression variable. The code to
+do this is surprisingly simple (code from the above two blocks duplicated for
+context):</p>
+
+<div class="doc_code">
+<pre>
+ // If BinOp binds less tightly with RHS than the operator after RHS, let
+ // the pending operator take RHS as its LHS.
+ int NextPrec = GetTokPrecedence();
+ if (TokPrec &lt; NextPrec) {
+ <b>RHS = ParseBinOpRHS(TokPrec+1, RHS);
+ if (RHS == 0) return 0;</b>
+ }
+ // Merge LHS/RHS.
+ LHS = new BinaryExprAST(BinOp, LHS, RHS);
+ } // loop around to the top of the while loop.
+}
+</pre>
+</div>
+
+<p>At this point, we know that the binary operator to the RHS of our primary
+has higher precedence than the binop we are currently parsing. As such, we know
+that any sequence of pairs whose operators are all higher precedence than "+"
+should be parsed together and returned as "RHS". To do this, we recursively
+invoke the <tt>ParseBinOpRHS</tt> function specifying "TokPrec+1" as the minimum
+precedence required for it to continue. In our example above, this will cause
+it to return the AST node for "(c+d)*e*f" as RHS, which is then set as the RHS
+of the '+' expression.</p>
+
+<p>Finally, on the next iteration of the while loop, the "+g" piece is parsed
+and added to the AST. With this little bit of code (14 non-trivial lines), we
+correctly handle fully general binary expression parsing in a very elegant way.
+This was a whirlwind tour of this code, and it is somewhat subtle. I recommend
+running through it with a few tough examples to see how it works.
+</p>
+
+<p>This wraps up handling of expressions. At this point, we can point the
+parser at an arbitrary token stream and build an expression from it, stopping
+at the first token that is not part of the expression. Next up we need to
+handle function definitions, etc.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="parsertop">Parsing the Rest</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+The next thing missing is handling of function prototypes. In Kaleidoscope,
+these are used both for 'extern' function declarations as well as function body
+definitions. The code to do this is straight-forward and not very interesting
+(once you've survived expressions):
+</p>
+
+<div class="doc_code">
+<pre>
+/// prototype
+/// ::= id '(' id* ')'
+static PrototypeAST *ParsePrototype() {
+ if (CurTok != tok_identifier)
+ return ErrorP("Expected function name in prototype");
+
+ std::string FnName = IdentifierStr;
+ getNextToken();
+
+ if (CurTok != '(')
+ return ErrorP("Expected '(' in prototype");
+
+ // Read the list of argument names.
+ std::vector&lt;std::string&gt; ArgNames;
+ while (getNextToken() == tok_identifier)
+ ArgNames.push_back(IdentifierStr);
+ if (CurTok != ')')
+ return ErrorP("Expected ')' in prototype");
+
+ // success.
+ getNextToken(); // eat ')'.
+
+ return new PrototypeAST(FnName, ArgNames);
+}
+</pre>
+</div>
+
+<p>Given this, a function definition is very simple, just a prototype plus
+an expression to implement the body:</p>
+
+<div class="doc_code">
+<pre>
+/// definition ::= 'def' prototype expression
+static FunctionAST *ParseDefinition() {
+ getNextToken(); // eat def.
+ PrototypeAST *Proto = ParsePrototype();
+ if (Proto == 0) return 0;
+
+ if (ExprAST *E = ParseExpression())
+ return new FunctionAST(Proto, E);
+ return 0;
+}
+</pre>
+</div>
+
+<p>In addition, we support 'extern' to declare functions like 'sin' and 'cos' as
+well as to support forward declaration of user functions. These 'extern's are just
+prototypes with no body:</p>
+
+<div class="doc_code">
+<pre>
+/// external ::= 'extern' prototype
+static PrototypeAST *ParseExtern() {
+ getNextToken(); // eat extern.
+ return ParsePrototype();
+}
+</pre>
+</div>
+
+<p>Finally, we'll also let the user type in arbitrary top-level expressions and
+evaluate them on the fly. We will handle this by defining anonymous nullary
+(zero argument) functions for them:</p>
+
+<div class="doc_code">
+<pre>
+/// toplevelexpr ::= expression
+static FunctionAST *ParseTopLevelExpr() {
+ if (ExprAST *E = ParseExpression()) {
+ // Make an anonymous proto.
+ PrototypeAST *Proto = new PrototypeAST("", std::vector&lt;std::string&gt;());
+ return new FunctionAST(Proto, E);
+ }
+ return 0;
+}
+</pre>
+</div>
+
+<p>Now that we have all the pieces, let's build a little driver that will let us
+actually <em>execute</em> this code we've built!</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="driver">The Driver</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The driver for this simply invokes all of the parsing pieces with a top-level
+dispatch loop. There isn't much interesting here, so I'll just include the
+top-level loop. See <a href="#code">below</a> for full code in the "Top-Level
+Parsing" section.</p>
+
+<div class="doc_code">
+<pre>
+/// top ::= definition | external | expression | ';'
+static void MainLoop() {
+ while (1) {
+ fprintf(stderr, "ready&gt; ");
+ switch (CurTok) {
+ case tok_eof: return;
+ case ';': getNextToken(); break; // ignore top-level semicolons.
+ case tok_def: HandleDefinition(); break;
+ case tok_extern: HandleExtern(); break;
+ default: HandleTopLevelExpression(); break;
+ }
+ }
+}
+</pre>
+</div>
+
+<p>The most interesting part of this is that we ignore top-level semicolons.
+Why is this, you ask? The basic reason is that if you type "4 + 5" at the
+command line, the parser doesn't know whether that is the end of what you will type
+or not. For example, on the next line you could type "def foo..." in which case
+4+5 is the end of a top-level expression. Alternatively you could type "* 6",
+which would continue the expression. Having top-level semicolons allows you to
+type "4+5;", and the parser will know you are done.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="conclusions">Conclusions</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>With just under 400 lines of commented code (240 lines of non-comment,
+non-blank code), we fully defined our minimal language, including a lexer,
+parser, and AST builder. With this done, the executable will validate
+Kaleidoscope code and tell us if it is grammatically invalid. For
+example, here is a sample interaction:</p>
+
+<div class="doc_code">
+<pre>
+$ <b>./a.out</b>
+ready&gt; <b>def foo(x y) x+foo(y, 4.0);</b>
+Parsed a function definition.
+ready&gt; <b>def foo(x y) x+y y;</b>
+Parsed a function definition.
+Parsed a top-level expr
+ready&gt; <b>def foo(x y) x+y );</b>
+Parsed a function definition.
+Error: unknown token when expecting an expression
+ready&gt; <b>extern sin(a);</b>
+ready&gt; Parsed an extern
+ready&gt; <b>^D</b>
+$
+</pre>
+</div>
+
+<p>There is a lot of room for extension here. You can define new AST nodes,
+extend the language in many ways, etc. In the <a href="LangImpl3.html">next
+installment</a>, we will describe how to generate LLVM Intermediate
+Representation (IR) from the AST.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="code">Full Code Listing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Here is the complete code listing for this and the previous chapter.
+Note that it is fully self-contained: you don't need LLVM or any external
+libraries at all for this. (Besides the C and C++ standard libraries, of
+course.) To build this, just compile with:</p>
+
+<div class="doc_code">
+<pre>
+# Compile
+clang++ -g -O3 toy.cpp
+# Run
+./a.out
+</pre>
+</div>
+
+<p>Here is the code:</p>
+
+<div class="doc_code">
+<pre>
+#include &lt;cstdio&gt;
+#include &lt;cstdlib&gt;
+#include &lt;string&gt;
+#include &lt;map&gt;
+#include &lt;vector&gt;
+
+//===----------------------------------------------------------------------===//
+// Lexer
+//===----------------------------------------------------------------------===//
+
+// The lexer returns tokens [0-255] if it is an unknown character, otherwise one
+// of these for known things.
+enum Token {
+ tok_eof = -1,
+
+ // commands
+ tok_def = -2, tok_extern = -3,
+
+ // primary
+ tok_identifier = -4, tok_number = -5
+};
+
+static std::string IdentifierStr; // Filled in if tok_identifier
+static double NumVal; // Filled in if tok_number
+
+/// gettok - Return the next token from standard input.
+static int gettok() {
+ static int LastChar = ' ';
+
+ // Skip any whitespace.
+ while (isspace(LastChar))
+ LastChar = getchar();
+
+ if (isalpha(LastChar)) { // identifier: [a-zA-Z][a-zA-Z0-9]*
+ IdentifierStr = LastChar;
+ while (isalnum((LastChar = getchar())))
+ IdentifierStr += LastChar;
+
+ if (IdentifierStr == "def") return tok_def;
+ if (IdentifierStr == "extern") return tok_extern;
+ return tok_identifier;
+ }
+
+ if (isdigit(LastChar) || LastChar == '.') { // Number: [0-9.]+
+ std::string NumStr;
+ do {
+ NumStr += LastChar;
+ LastChar = getchar();
+ } while (isdigit(LastChar) || LastChar == '.');
+
+ NumVal = strtod(NumStr.c_str(), 0);
+ return tok_number;
+ }
+
+ if (LastChar == '#') {
+ // Comment until end of line.
+ do LastChar = getchar();
+ while (LastChar != EOF &amp;&amp; LastChar != '\n' &amp;&amp; LastChar != '\r');
+
+ if (LastChar != EOF)
+ return gettok();
+ }
+
+ // Check for end of file. Don't eat the EOF.
+ if (LastChar == EOF)
+ return tok_eof;
+
+ // Otherwise, just return the character as its ascii value.
+ int ThisChar = LastChar;
+ LastChar = getchar();
+ return ThisChar;
+}
+
+//===----------------------------------------------------------------------===//
+// Abstract Syntax Tree (aka Parse Tree)
+//===----------------------------------------------------------------------===//
+
+/// ExprAST - Base class for all expression nodes.
+class ExprAST {
+public:
+ virtual ~ExprAST() {}
+};
+
+/// NumberExprAST - Expression class for numeric literals like "1.0".
+class NumberExprAST : public ExprAST {
+ double Val;
+public:
+ NumberExprAST(double val) : Val(val) {}
+};
+
+/// VariableExprAST - Expression class for referencing a variable, like "a".
+class VariableExprAST : public ExprAST {
+ std::string Name;
+public:
+ VariableExprAST(const std::string &amp;name) : Name(name) {}
+};
+
+/// BinaryExprAST - Expression class for a binary operator.
+class BinaryExprAST : public ExprAST {
+ char Op;
+ ExprAST *LHS, *RHS;
+public:
+ BinaryExprAST(char op, ExprAST *lhs, ExprAST *rhs)
+ : Op(op), LHS(lhs), RHS(rhs) {}
+};
+
+/// CallExprAST - Expression class for function calls.
+class CallExprAST : public ExprAST {
+ std::string Callee;
+ std::vector&lt;ExprAST*&gt; Args;
+public:
+ CallExprAST(const std::string &amp;callee, std::vector&lt;ExprAST*&gt; &amp;args)
+ : Callee(callee), Args(args) {}
+};
+
+/// PrototypeAST - This class represents the "prototype" for a function,
+/// which captures its name, and its argument names (thus implicitly the number
+/// of arguments the function takes).
+class PrototypeAST {
+ std::string Name;
+ std::vector&lt;std::string&gt; Args;
+public:
+ PrototypeAST(const std::string &amp;name, const std::vector&lt;std::string&gt; &amp;args)
+ : Name(name), Args(args) {}
+
+};
+
+/// FunctionAST - This class represents a function definition itself.
+class FunctionAST {
+ PrototypeAST *Proto;
+ ExprAST *Body;
+public:
+ FunctionAST(PrototypeAST *proto, ExprAST *body)
+ : Proto(proto), Body(body) {}
+
+};
+
+//===----------------------------------------------------------------------===//
+// Parser
+//===----------------------------------------------------------------------===//
+
+/// CurTok/getNextToken - Provide a simple token buffer. CurTok is the current
+/// token the parser is looking at. getNextToken reads another token from the
+/// lexer and updates CurTok with its results.
+static int CurTok;
+static int getNextToken() {
+ return CurTok = gettok();
+}
+
+/// BinopPrecedence - This holds the precedence for each binary operator that is
+/// defined.
+static std::map&lt;char, int&gt; BinopPrecedence;
+
+/// GetTokPrecedence - Get the precedence of the pending binary operator token.
+static int GetTokPrecedence() {
+ if (!isascii(CurTok))
+ return -1;
+
+ // Make sure it's a declared binop.
+ int TokPrec = BinopPrecedence[CurTok];
+ if (TokPrec &lt;= 0) return -1;
+ return TokPrec;
+}
+
+/// Error* - These are little helper functions for error handling.
+ExprAST *Error(const char *Str) { fprintf(stderr, "Error: %s\n", Str);return 0;}
+PrototypeAST *ErrorP(const char *Str) { Error(Str); return 0; }
+FunctionAST *ErrorF(const char *Str) { Error(Str); return 0; }
+
+static ExprAST *ParseExpression();
+
+/// identifierexpr
+/// ::= identifier
+/// ::= identifier '(' expression* ')'
+static ExprAST *ParseIdentifierExpr() {
+ std::string IdName = IdentifierStr;
+
+ getNextToken(); // eat identifier.
+
+ if (CurTok != '(') // Simple variable ref.
+ return new VariableExprAST(IdName);
+
+ // Call.
+ getNextToken(); // eat (
+ std::vector&lt;ExprAST*&gt; Args;
+ if (CurTok != ')') {
+ while (1) {
+ ExprAST *Arg = ParseExpression();
+ if (!Arg) return 0;
+ Args.push_back(Arg);
+
+ if (CurTok == ')') break;
+
+ if (CurTok != ',')
+ return Error("Expected ')' or ',' in argument list");
+ getNextToken();
+ }
+ }
+
+ // Eat the ')'.
+ getNextToken();
+
+ return new CallExprAST(IdName, Args);
+}
+
+/// numberexpr ::= number
+static ExprAST *ParseNumberExpr() {
+ ExprAST *Result = new NumberExprAST(NumVal);
+ getNextToken(); // consume the number
+ return Result;
+}
+
+/// parenexpr ::= '(' expression ')'
+static ExprAST *ParseParenExpr() {
+ getNextToken(); // eat (.
+ ExprAST *V = ParseExpression();
+ if (!V) return 0;
+
+ if (CurTok != ')')
+ return Error("expected ')'");
+ getNextToken(); // eat ).
+ return V;
+}
+
+/// primary
+/// ::= identifierexpr
+/// ::= numberexpr
+/// ::= parenexpr
+static ExprAST *ParsePrimary() {
+ switch (CurTok) {
+ default: return Error("unknown token when expecting an expression");
+ case tok_identifier: return ParseIdentifierExpr();
+ case tok_number: return ParseNumberExpr();
+ case '(': return ParseParenExpr();
+ }
+}
+
+/// binoprhs
+/// ::= ('+' primary)*
+static ExprAST *ParseBinOpRHS(int ExprPrec, ExprAST *LHS) {
+ // If this is a binop, find its precedence.
+ while (1) {
+ int TokPrec = GetTokPrecedence();
+
+ // If this is a binop that binds at least as tightly as the current binop,
+ // consume it, otherwise we are done.
+ if (TokPrec &lt; ExprPrec)
+ return LHS;
+
+ // Okay, we know this is a binop.
+ int BinOp = CurTok;
+ getNextToken(); // eat binop
+
+ // Parse the primary expression after the binary operator.
+ ExprAST *RHS = ParsePrimary();
+ if (!RHS) return 0;
+
+ // If BinOp binds less tightly with RHS than the operator after RHS, let
+ // the pending operator take RHS as its LHS.
+ int NextPrec = GetTokPrecedence();
+ if (TokPrec &lt; NextPrec) {
+ RHS = ParseBinOpRHS(TokPrec+1, RHS);
+ if (RHS == 0) return 0;
+ }
+
+ // Merge LHS/RHS.
+ LHS = new BinaryExprAST(BinOp, LHS, RHS);
+ }
+}
+
+/// expression
+/// ::= primary binoprhs
+///
+static ExprAST *ParseExpression() {
+ ExprAST *LHS = ParsePrimary();
+ if (!LHS) return 0;
+
+ return ParseBinOpRHS(0, LHS);
+}
+
+/// prototype
+/// ::= id '(' id* ')'
+static PrototypeAST *ParsePrototype() {
+ if (CurTok != tok_identifier)
+ return ErrorP("Expected function name in prototype");
+
+ std::string FnName = IdentifierStr;
+ getNextToken();
+
+ if (CurTok != '(')
+ return ErrorP("Expected '(' in prototype");
+
+ std::vector&lt;std::string&gt; ArgNames;
+ while (getNextToken() == tok_identifier)
+ ArgNames.push_back(IdentifierStr);
+ if (CurTok != ')')
+ return ErrorP("Expected ')' in prototype");
+
+ // success.
+ getNextToken(); // eat ')'.
+
+ return new PrototypeAST(FnName, ArgNames);
+}
+
+/// definition ::= 'def' prototype expression
+static FunctionAST *ParseDefinition() {
+ getNextToken(); // eat def.
+ PrototypeAST *Proto = ParsePrototype();
+ if (Proto == 0) return 0;
+
+ if (ExprAST *E = ParseExpression())
+ return new FunctionAST(Proto, E);
+ return 0;
+}
+
+/// toplevelexpr ::= expression
+static FunctionAST *ParseTopLevelExpr() {
+ if (ExprAST *E = ParseExpression()) {
+ // Make an anonymous proto.
+ PrototypeAST *Proto = new PrototypeAST("", std::vector&lt;std::string&gt;());
+ return new FunctionAST(Proto, E);
+ }
+ return 0;
+}
+
+/// external ::= 'extern' prototype
+static PrototypeAST *ParseExtern() {
+ getNextToken(); // eat extern.
+ return ParsePrototype();
+}
+
+//===----------------------------------------------------------------------===//
+// Top-Level parsing
+//===----------------------------------------------------------------------===//
+
+static void HandleDefinition() {
+ if (ParseDefinition()) {
+ fprintf(stderr, "Parsed a function definition.\n");
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+static void HandleExtern() {
+ if (ParseExtern()) {
+ fprintf(stderr, "Parsed an extern\n");
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+static void HandleTopLevelExpression() {
+ // Evaluate a top-level expression into an anonymous function.
+ if (ParseTopLevelExpr()) {
+ fprintf(stderr, "Parsed a top-level expr\n");
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+/// top ::= definition | external | expression | ';'
+static void MainLoop() {
+ while (1) {
+ fprintf(stderr, "ready&gt; ");
+ switch (CurTok) {
+ case tok_eof: return;
+ case ';': getNextToken(); break; // ignore top-level semicolons.
+ case tok_def: HandleDefinition(); break;
+ case tok_extern: HandleExtern(); break;
+ default: HandleTopLevelExpression(); break;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Main driver code.
+//===----------------------------------------------------------------------===//
+
+int main() {
+ // Install standard binary operators.
+ // 1 is lowest precedence.
+ BinopPrecedence['&lt;'] = 10;
+ BinopPrecedence['+'] = 20;
+ BinopPrecedence['-'] = 20;
+ BinopPrecedence['*'] = 40; // highest.
+
+ // Prime the first token.
+ fprintf(stderr, "ready&gt; ");
+ getNextToken();
+
+ // Run the main "interpreter loop" now.
+ MainLoop();
+
+ return 0;
+}
+</pre>
+</div>
+<a href="LangImpl3.html">Next: Implementing Code Generation to LLVM IR</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/LangImpl3.html b/docs/tutorial/LangImpl3.html
new file mode 100644
index 00000000000..57ff7373f69
--- /dev/null
+++ b/docs/tutorial/LangImpl3.html
@@ -0,0 +1,1268 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Implementing code generation to LLVM IR</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Code generation to LLVM IR</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 3
+ <ol>
+ <li><a href="#intro">Chapter 3 Introduction</a></li>
+ <li><a href="#basics">Code Generation Setup</a></li>
+ <li><a href="#exprs">Expression Code Generation</a></li>
+ <li><a href="#funcs">Function Code Generation</a></li>
+ <li><a href="#driver">Driver Changes and Closing Thoughts</a></li>
+ <li><a href="#code">Full Code Listing</a></li>
+ </ol>
+</li>
+<li><a href="LangImpl4.html">Chapter 4</a>: Adding JIT and Optimizer
+Support</li>
+</ul>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Chapter 3 Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to Chapter 3 of the "<a href="index.html">Implementing a language
+with LLVM</a>" tutorial. This chapter shows you how to transform the <a
+href="LangImpl2.html">Abstract Syntax Tree</a>, built in Chapter 2, into LLVM IR.
+This will teach you a little bit about how LLVM does things, as well as
+demonstrate how easy it is to use. It's much more work to build a lexer and
+parser than it is to generate LLVM IR code. :)
+</p>
+
+<p><b>Please note</b>: the code in this chapter and later require LLVM 2.2 or
+later. LLVM 2.1 and before will not work with it. Also note that you need
+to use a version of this tutorial that matches your LLVM release: If you are
+using an official LLVM release, use the version of the documentation included
+with your release or on the <a href="http://llvm.org/releases/">llvm.org
+releases page</a>.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="basics">Code Generation Setup</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+In order to generate LLVM IR, we want some simple setup to get started. First
+we define virtual code generation (codegen) methods in each AST class:</p>
+
+<div class="doc_code">
+<pre>
+/// ExprAST - Base class for all expression nodes.
+class ExprAST {
+public:
+ virtual ~ExprAST() {}
+ <b>virtual Value *Codegen() = 0;</b>
+};
+
+/// NumberExprAST - Expression class for numeric literals like "1.0".
+class NumberExprAST : public ExprAST {
+ double Val;
+public:
+ NumberExprAST(double val) : Val(val) {}
+ <b>virtual Value *Codegen();</b>
+};
+...
+</pre>
+</div>
+
+<p>The Codegen() method says to emit IR for that AST node along with all the things it
+depends on, and they all return an LLVM Value object.
+"Value" is the class used to represent a "<a
+href="http://en.wikipedia.org/wiki/Static_single_assignment_form">Static Single
+Assignment (SSA)</a> register" or "SSA value" in LLVM. The most distinct aspect
+of SSA values is that their value is computed as the related instruction
+executes, and it does not get a new value until (and if) the instruction
+re-executes. In other words, there is no way to "change" an SSA value. For
+more information, please read up on <a
+href="http://en.wikipedia.org/wiki/Static_single_assignment_form">Static Single
+Assignment</a> - the concepts are really quite natural once you grok them.</p>
+
+<p>Note that instead of adding virtual methods to the ExprAST class hierarchy,
+it could also make sense to use a <a
+href="http://en.wikipedia.org/wiki/Visitor_pattern">visitor pattern</a> or some
+other way to model this. Again, this tutorial won't dwell on good software
+engineering practices: for our purposes, adding a virtual method is
+simplest.</p>
+
+<p>The
+second thing we want is an "Error" method like we used for the parser, which will
+be used to report errors found during code generation (for example, use of an
+undeclared parameter):</p>
+
+<div class="doc_code">
+<pre>
+Value *ErrorV(const char *Str) { Error(Str); return 0; }
+
+static Module *TheModule;
+static IRBuilder&lt;&gt; Builder(getGlobalContext());
+static std::map&lt;std::string, Value*&gt; NamedValues;
+</pre>
+</div>
+
+<p>The static variables will be used during code generation. <tt>TheModule</tt>
+is the LLVM construct that contains all of the functions and global variables in
+a chunk of code. In many ways, it is the top-level structure that the LLVM IR
+uses to contain code.</p>
+
+<p>The <tt>Builder</tt> object is a helper object that makes it easy to generate
+LLVM instructions. Instances of the <a
+href="http://llvm.org/doxygen/IRBuilder_8h-source.html"><tt>IRBuilder</tt></a>
+class template keep track of the current place to insert instructions and has
+methods to create new instructions.</p>
+
+<p>The <tt>NamedValues</tt> map keeps track of which values are defined in the
+current scope and what their LLVM representation is. (In other words, it is a
+symbol table for the code). In this form of Kaleidoscope, the only things that
+can be referenced are function parameters. As such, function parameters will
+be in this map when generating code for their function body.</p>
+
+<p>
+With these basics in place, we can start talking about how to generate code for
+each expression. Note that this assumes that the <tt>Builder</tt> has been set
+up to generate code <em>into</em> something. For now, we'll assume that this
+has already been done, and we'll just use it to emit code.
+</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="exprs">Expression Code Generation</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Generating LLVM code for expression nodes is very straightforward: less
+than 45 lines of commented code for all four of our expression nodes. First
+we'll do numeric literals:</p>
+
+<div class="doc_code">
+<pre>
+Value *NumberExprAST::Codegen() {
+ return ConstantFP::get(getGlobalContext(), APFloat(Val));
+}
+</pre>
+</div>
+
+<p>In the LLVM IR, numeric constants are represented with the
+<tt>ConstantFP</tt> class, which holds the numeric value in an <tt>APFloat</tt>
+internally (<tt>APFloat</tt> has the capability of holding floating point
+constants of <em>A</em>rbitrary <em>P</em>recision). This code basically just
+creates and returns a <tt>ConstantFP</tt>. Note that in the LLVM IR
+that constants are all uniqued together and shared. For this reason, the API
+uses the "foo::get(...)" idiom instead of "new foo(..)" or "foo::Create(..)".</p>
+
+<div class="doc_code">
+<pre>
+Value *VariableExprAST::Codegen() {
+ // Look this variable up in the function.
+ Value *V = NamedValues[Name];
+ return V ? V : ErrorV("Unknown variable name");
+}
+</pre>
+</div>
+
+<p>References to variables are also quite simple using LLVM. In the simple version
+of Kaleidoscope, we assume that the variable has already been emitted somewhere
+and its value is available. In practice, the only values that can be in the
+<tt>NamedValues</tt> map are function arguments. This
+code simply checks to see that the specified name is in the map (if not, an
+unknown variable is being referenced) and returns the value for it. In future
+chapters, we'll add support for <a href="LangImpl5.html#for">loop induction
+variables</a> in the symbol table, and for <a
+href="LangImpl7.html#localvars">local variables</a>.</p>
+
+<div class="doc_code">
+<pre>
+Value *BinaryExprAST::Codegen() {
+ Value *L = LHS-&gt;Codegen();
+ Value *R = RHS-&gt;Codegen();
+ if (L == 0 || R == 0) return 0;
+
+ switch (Op) {
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
+ case '&lt;':
+ L = Builder.CreateFCmpULT(L, R, "cmptmp");
+ // Convert bool 0/1 to double 0.0 or 1.0
+ return Builder.CreateUIToFP(L, Type::getDoubleTy(getGlobalContext()),
+ "booltmp");
+ default: return ErrorV("invalid binary operator");
+ }
+}
+</pre>
+</div>
+
+<p>Binary operators start to get more interesting. The basic idea here is that
+we recursively emit code for the left-hand side of the expression, then the
+right-hand side, then we compute the result of the binary expression. In this
+code, we do a simple switch on the opcode to create the right LLVM instruction.
+</p>
+
+<p>In the example above, the LLVM builder class is starting to show its value.
+IRBuilder knows where to insert the newly created instruction, all you have to
+do is specify what instruction to create (e.g. with <tt>CreateFAdd</tt>), which
+operands to use (<tt>L</tt> and <tt>R</tt> here) and optionally provide a name
+for the generated instruction.</p>
+
+<p>One nice thing about LLVM is that the name is just a hint. For instance, if
+the code above emits multiple "addtmp" variables, LLVM will automatically
+provide each one with an increasing, unique numeric suffix. Local value names
+for instructions are purely optional, but it makes it much easier to read the
+IR dumps.</p>
+
+<p><a href="../LangRef.html#instref">LLVM instructions</a> are constrained by
+strict rules: for example, the Left and Right operators of
+an <a href="../LangRef.html#i_add">add instruction</a> must have the same
+type, and the result type of the add must match the operand types. Because
+all values in Kaleidoscope are doubles, this makes for very simple code for add,
+sub and mul.</p>
+
+<p>On the other hand, LLVM specifies that the <a
+href="../LangRef.html#i_fcmp">fcmp instruction</a> always returns an 'i1' value
+(a one bit integer). The problem with this is that Kaleidoscope wants the value to be a 0.0 or 1.0 value. In order to get these semantics, we combine the fcmp instruction with
+a <a href="../LangRef.html#i_uitofp">uitofp instruction</a>. This instruction
+converts its input integer into a floating point value by treating the input
+as an unsigned value. In contrast, if we used the <a
+href="../LangRef.html#i_sitofp">sitofp instruction</a>, the Kaleidoscope '&lt;'
+operator would return 0.0 and -1.0, depending on the input value.</p>
+
+<div class="doc_code">
+<pre>
+Value *CallExprAST::Codegen() {
+ // Look up the name in the global module table.
+ Function *CalleeF = TheModule-&gt;getFunction(Callee);
+ if (CalleeF == 0)
+ return ErrorV("Unknown function referenced");
+
+ // If argument mismatch error.
+ if (CalleeF-&gt;arg_size() != Args.size())
+ return ErrorV("Incorrect # arguments passed");
+
+ std::vector&lt;Value*&gt; ArgsV;
+ for (unsigned i = 0, e = Args.size(); i != e; ++i) {
+ ArgsV.push_back(Args[i]-&gt;Codegen());
+ if (ArgsV.back() == 0) return 0;
+ }
+
+ return Builder.CreateCall(CalleeF, ArgsV, "calltmp");
+}
+</pre>
+</div>
+
+<p>Code generation for function calls is quite straightforward with LLVM. The
+code above initially does a function name lookup in the LLVM Module's symbol
+table. Recall that the LLVM Module is the container that holds all of the
+functions we are JIT'ing. By giving each function the same name as what the
+user specifies, we can use the LLVM symbol table to resolve function names for
+us.</p>
+
+<p>Once we have the function to call, we recursively codegen each argument that
+is to be passed in, and create an LLVM <a href="../LangRef.html#i_call">call
+instruction</a>. Note that LLVM uses the native C calling conventions by
+default, allowing these calls to also call into standard library functions like
+"sin" and "cos", with no additional effort.</p>
+
+<p>This wraps up our handling of the four basic expressions that we have so far
+in Kaleidoscope. Feel free to go in and add some more. For example, by
+browsing the <a href="../LangRef.html">LLVM language reference</a> you'll find
+several other interesting instructions that are really easy to plug into our
+basic framework.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="funcs">Function Code Generation</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Code generation for prototypes and functions must handle a number of
+details, which make their code less beautiful than expression code
+generation, but allows us to illustrate some important points. First, lets
+talk about code generation for prototypes: they are used both for function
+bodies and external function declarations. The code starts with:</p>
+
+<div class="doc_code">
+<pre>
+Function *PrototypeAST::Codegen() {
+ // Make the function type: double(double,double) etc.
+ std::vector&lt;Type*&gt; Doubles(Args.size(),
+ Type::getDoubleTy(getGlobalContext()));
+ FunctionType *FT = FunctionType::get(Type::getDoubleTy(getGlobalContext()),
+ Doubles, false);
+
+ Function *F = Function::Create(FT, Function::ExternalLinkage, Name, TheModule);
+</pre>
+</div>
+
+<p>This code packs a lot of power into a few lines. Note first that this
+function returns a "Function*" instead of a "Value*". Because a "prototype"
+really talks about the external interface for a function (not the value computed
+by an expression), it makes sense for it to return the LLVM Function it
+corresponds to when codegen'd.</p>
+
+<p>The call to <tt>FunctionType::get</tt> creates
+the <tt>FunctionType</tt> that should be used for a given Prototype. Since all
+function arguments in Kaleidoscope are of type double, the first line creates
+a vector of "N" LLVM double types. It then uses the <tt>Functiontype::get</tt>
+method to create a function type that takes "N" doubles as arguments, returns
+one double as a result, and that is not vararg (the false parameter indicates
+this). Note that Types in LLVM are uniqued just like Constants are, so you
+don't "new" a type, you "get" it.</p>
+
+<p>The final line above actually creates the function that the prototype will
+correspond to. This indicates the type, linkage and name to use, as well as which
+module to insert into. "<a href="../LangRef.html#linkage">external linkage</a>"
+means that the function may be defined outside the current module and/or that it
+is callable by functions outside the module. The Name passed in is the name the
+user specified: since "<tt>TheModule</tt>" is specified, this name is registered
+in "<tt>TheModule</tt>"s symbol table, which is used by the function call code
+above.</p>
+
+<div class="doc_code">
+<pre>
+ // If F conflicted, there was already something named 'Name'. If it has a
+ // body, don't allow redefinition or reextern.
+ if (F-&gt;getName() != Name) {
+ // Delete the one we just made and get the existing one.
+ F-&gt;eraseFromParent();
+ F = TheModule-&gt;getFunction(Name);
+</pre>
+</div>
+
+<p>The Module symbol table works just like the Function symbol table when it
+comes to name conflicts: if a new function is created with a name that was previously
+added to the symbol table, the new function will get implicitly renamed when added to the
+Module. The code above exploits this fact to determine if there was a previous
+definition of this function.</p>
+
+<p>In Kaleidoscope, I choose to allow redefinitions of functions in two cases:
+first, we want to allow 'extern'ing a function more than once, as long as the
+prototypes for the externs match (since all arguments have the same type, we
+just have to check that the number of arguments match). Second, we want to
+allow 'extern'ing a function and then defining a body for it. This is useful
+when defining mutually recursive functions.</p>
+
+<p>In order to implement this, the code above first checks to see if there is
+a collision on the name of the function. If so, it deletes the function we just
+created (by calling <tt>eraseFromParent</tt>) and then calling
+<tt>getFunction</tt> to get the existing function with the specified name. Note
+that many APIs in LLVM have "erase" forms and "remove" forms. The "remove" form
+unlinks the object from its parent (e.g. a Function from a Module) and returns
+it. The "erase" form unlinks the object and then deletes it.</p>
+
+<div class="doc_code">
+<pre>
+ // If F already has a body, reject this.
+ if (!F-&gt;empty()) {
+ ErrorF("redefinition of function");
+ return 0;
+ }
+
+ // If F took a different number of args, reject.
+ if (F-&gt;arg_size() != Args.size()) {
+ ErrorF("redefinition of function with different # args");
+ return 0;
+ }
+ }
+</pre>
+</div>
+
+<p>In order to verify the logic above, we first check to see if the pre-existing
+function is "empty". In this case, empty means that it has no basic blocks in
+it, which means it has no body. If it has no body, it is a forward
+declaration. Since we don't allow anything after a full definition of the
+function, the code rejects this case. If the previous reference to a function
+was an 'extern', we simply verify that the number of arguments for that
+definition and this one match up. If not, we emit an error.</p>
+
+<div class="doc_code">
+<pre>
+ // Set names for all arguments.
+ unsigned Idx = 0;
+ for (Function::arg_iterator AI = F-&gt;arg_begin(); Idx != Args.size();
+ ++AI, ++Idx) {
+ AI-&gt;setName(Args[Idx]);
+
+ // Add arguments to variable symbol table.
+ NamedValues[Args[Idx]] = AI;
+ }
+ return F;
+}
+</pre>
+</div>
+
+<p>The last bit of code for prototypes loops over all of the arguments in the
+function, setting the name of the LLVM Argument objects to match, and registering
+the arguments in the <tt>NamedValues</tt> map for future use by the
+<tt>VariableExprAST</tt> AST node. Once this is set up, it returns the Function
+object to the caller. Note that we don't check for conflicting
+argument names here (e.g. "extern foo(a b a)"). Doing so would be very
+straight-forward with the mechanics we have already used above.</p>
+
+<div class="doc_code">
+<pre>
+Function *FunctionAST::Codegen() {
+ NamedValues.clear();
+
+ Function *TheFunction = Proto-&gt;Codegen();
+ if (TheFunction == 0)
+ return 0;
+</pre>
+</div>
+
+<p>Code generation for function definitions starts out simply enough: we just
+codegen the prototype (Proto) and verify that it is ok. We then clear out the
+<tt>NamedValues</tt> map to make sure that there isn't anything in it from the
+last function we compiled. Code generation of the prototype ensures that there
+is an LLVM Function object that is ready to go for us.</p>
+
+<div class="doc_code">
+<pre>
+ // Create a new basic block to start insertion into.
+ BasicBlock *BB = BasicBlock::Create(getGlobalContext(), "entry", TheFunction);
+ Builder.SetInsertPoint(BB);
+
+ if (Value *RetVal = Body-&gt;Codegen()) {
+</pre>
+</div>
+
+<p>Now we get to the point where the <tt>Builder</tt> is set up. The first
+line creates a new <a href="http://en.wikipedia.org/wiki/Basic_block">basic
+block</a> (named "entry"), which is inserted into <tt>TheFunction</tt>. The
+second line then tells the builder that new instructions should be inserted into
+the end of the new basic block. Basic blocks in LLVM are an important part
+of functions that define the <a
+href="http://en.wikipedia.org/wiki/Control_flow_graph">Control Flow Graph</a>.
+Since we don't have any control flow, our functions will only contain one
+block at this point. We'll fix this in <a href="LangImpl5.html">Chapter 5</a> :).</p>
+
+<div class="doc_code">
+<pre>
+ if (Value *RetVal = Body-&gt;Codegen()) {
+ // Finish off the function.
+ Builder.CreateRet(RetVal);
+
+ // Validate the generated code, checking for consistency.
+ verifyFunction(*TheFunction);
+
+ return TheFunction;
+ }
+</pre>
+</div>
+
+<p>Once the insertion point is set up, we call the <tt>CodeGen()</tt> method for
+the root expression of the function. If no error happens, this emits code to
+compute the expression into the entry block and returns the value that was
+computed. Assuming no error, we then create an LLVM <a
+href="../LangRef.html#i_ret">ret instruction</a>, which completes the function.
+Once the function is built, we call <tt>verifyFunction</tt>, which
+is provided by LLVM. This function does a variety of consistency checks on the
+generated code, to determine if our compiler is doing everything right. Using
+this is important: it can catch a lot of bugs. Once the function is finished
+and validated, we return it.</p>
+
+<div class="doc_code">
+<pre>
+ // Error reading body, remove function.
+ TheFunction-&gt;eraseFromParent();
+ return 0;
+}
+</pre>
+</div>
+
+<p>The only piece left here is handling of the error case. For simplicity, we
+handle this by merely deleting the function we produced with the
+<tt>eraseFromParent</tt> method. This allows the user to redefine a function
+that they incorrectly typed in before: if we didn't delete it, it would live in
+the symbol table, with a body, preventing future redefinition.</p>
+
+<p>This code does have a bug, though. Since the <tt>PrototypeAST::Codegen</tt>
+can return a previously defined forward declaration, our code can actually delete
+a forward declaration. There are a number of ways to fix this bug, see what you
+can come up with! Here is a testcase:</p>
+
+<div class="doc_code">
+<pre>
+extern foo(a b); # ok, defines foo.
+def foo(a b) c; # error, 'c' is invalid.
+def bar() foo(1, 2); # error, unknown function "foo"
+</pre>
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="driver">Driver Changes and Closing Thoughts</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+For now, code generation to LLVM doesn't really get us much, except that we can
+look at the pretty IR calls. The sample code inserts calls to Codegen into the
+"<tt>HandleDefinition</tt>", "<tt>HandleExtern</tt>" etc functions, and then
+dumps out the LLVM IR. This gives a nice way to look at the LLVM IR for simple
+functions. For example:
+</p>
+
+<div class="doc_code">
+<pre>
+ready> <b>4+5</b>;
+Read top-level expression:
+define double @0() {
+entry:
+ ret double 9.000000e+00
+}
+</pre>
+</div>
+
+<p>Note how the parser turns the top-level expression into anonymous functions
+for us. This will be handy when we add <a href="LangImpl4.html#jit">JIT
+support</a> in the next chapter. Also note that the code is very literally
+transcribed, no optimizations are being performed except simple constant
+folding done by IRBuilder. We will
+<a href="LangImpl4.html#trivialconstfold">add optimizations</a> explicitly in
+the next chapter.</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def foo(a b) a*a + 2*a*b + b*b;</b>
+Read function definition:
+define double @foo(double %a, double %b) {
+entry:
+ %multmp = fmul double %a, %a
+ %multmp1 = fmul double 2.000000e+00, %a
+ %multmp2 = fmul double %multmp1, %b
+ %addtmp = fadd double %multmp, %multmp2
+ %multmp3 = fmul double %b, %b
+ %addtmp4 = fadd double %addtmp, %multmp3
+ ret double %addtmp4
+}
+</pre>
+</div>
+
+<p>This shows some simple arithmetic. Notice the striking similarity to the
+LLVM builder calls that we use to create the instructions.</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def bar(a) foo(a, 4.0) + bar(31337);</b>
+Read function definition:
+define double @bar(double %a) {
+entry:
+ %calltmp = call double @foo(double %a, double 4.000000e+00)
+ %calltmp1 = call double @bar(double 3.133700e+04)
+ %addtmp = fadd double %calltmp, %calltmp1
+ ret double %addtmp
+}
+</pre>
+</div>
+
+<p>This shows some function calls. Note that this function will take a long
+time to execute if you call it. In the future we'll add conditional control
+flow to actually make recursion useful :).</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>extern cos(x);</b>
+Read extern:
+declare double @cos(double)
+
+ready&gt; <b>cos(1.234);</b>
+Read top-level expression:
+define double @1() {
+entry:
+ %calltmp = call double @cos(double 1.234000e+00)
+ ret double %calltmp
+}
+</pre>
+</div>
+
+<p>This shows an extern for the libm "cos" function, and a call to it.</p>
+
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>^D</b>
+; ModuleID = 'my cool jit'
+
+define double @0() {
+entry:
+ %addtmp = fadd double 4.000000e+00, 5.000000e+00
+ ret double %addtmp
+}
+
+define double @foo(double %a, double %b) {
+entry:
+ %multmp = fmul double %a, %a
+ %multmp1 = fmul double 2.000000e+00, %a
+ %multmp2 = fmul double %multmp1, %b
+ %addtmp = fadd double %multmp, %multmp2
+ %multmp3 = fmul double %b, %b
+ %addtmp4 = fadd double %addtmp, %multmp3
+ ret double %addtmp4
+}
+
+define double @bar(double %a) {
+entry:
+ %calltmp = call double @foo(double %a, double 4.000000e+00)
+ %calltmp1 = call double @bar(double 3.133700e+04)
+ %addtmp = fadd double %calltmp, %calltmp1
+ ret double %addtmp
+}
+
+declare double @cos(double)
+
+define double @1() {
+entry:
+ %calltmp = call double @cos(double 1.234000e+00)
+ ret double %calltmp
+}
+</pre>
+</div>
+
+<p>When you quit the current demo, it dumps out the IR for the entire module
+generated. Here you can see the big picture with all the functions referencing
+each other.</p>
+
+<p>This wraps up the third chapter of the Kaleidoscope tutorial. Up next, we'll
+describe how to <a href="LangImpl4.html">add JIT codegen and optimizer
+support</a> to this so we can actually start running code!</p>
+
+</div>
+
+
+<!-- *********************************************************************** -->
+<h2><a name="code">Full Code Listing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Here is the complete code listing for our running example, enhanced with the
+LLVM code generator. Because this uses the LLVM libraries, we need to link
+them in. To do this, we use the <a
+href="http://llvm.org/cmds/llvm-config.html">llvm-config</a> tool to inform
+our makefile/command line about which options to use:</p>
+
+<div class="doc_code">
+<pre>
+# Compile
+clang++ -g -O3 toy.cpp `llvm-config --cppflags --ldflags --libs core` -o toy
+# Run
+./toy
+</pre>
+</div>
+
+<p>Here is the code:</p>
+
+<div class="doc_code">
+<pre>
+// To build this:
+// See example below.
+
+#include "llvm/DerivedTypes.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Analysis/Verifier.h"
+#include &lt;cstdio&gt;
+#include &lt;string&gt;
+#include &lt;map&gt;
+#include &lt;vector&gt;
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Lexer
+//===----------------------------------------------------------------------===//
+
+// The lexer returns tokens [0-255] if it is an unknown character, otherwise one
+// of these for known things.
+enum Token {
+ tok_eof = -1,
+
+ // commands
+ tok_def = -2, tok_extern = -3,
+
+ // primary
+ tok_identifier = -4, tok_number = -5
+};
+
+static std::string IdentifierStr; // Filled in if tok_identifier
+static double NumVal; // Filled in if tok_number
+
+/// gettok - Return the next token from standard input.
+static int gettok() {
+ static int LastChar = ' ';
+
+ // Skip any whitespace.
+ while (isspace(LastChar))
+ LastChar = getchar();
+
+ if (isalpha(LastChar)) { // identifier: [a-zA-Z][a-zA-Z0-9]*
+ IdentifierStr = LastChar;
+ while (isalnum((LastChar = getchar())))
+ IdentifierStr += LastChar;
+
+ if (IdentifierStr == "def") return tok_def;
+ if (IdentifierStr == "extern") return tok_extern;
+ return tok_identifier;
+ }
+
+ if (isdigit(LastChar) || LastChar == '.') { // Number: [0-9.]+
+ std::string NumStr;
+ do {
+ NumStr += LastChar;
+ LastChar = getchar();
+ } while (isdigit(LastChar) || LastChar == '.');
+
+ NumVal = strtod(NumStr.c_str(), 0);
+ return tok_number;
+ }
+
+ if (LastChar == '#') {
+ // Comment until end of line.
+ do LastChar = getchar();
+ while (LastChar != EOF &amp;&amp; LastChar != '\n' &amp;&amp; LastChar != '\r');
+
+ if (LastChar != EOF)
+ return gettok();
+ }
+
+ // Check for end of file. Don't eat the EOF.
+ if (LastChar == EOF)
+ return tok_eof;
+
+ // Otherwise, just return the character as its ascii value.
+ int ThisChar = LastChar;
+ LastChar = getchar();
+ return ThisChar;
+}
+
+//===----------------------------------------------------------------------===//
+// Abstract Syntax Tree (aka Parse Tree)
+//===----------------------------------------------------------------------===//
+
+/// ExprAST - Base class for all expression nodes.
+class ExprAST {
+public:
+ virtual ~ExprAST() {}
+ virtual Value *Codegen() = 0;
+};
+
+/// NumberExprAST - Expression class for numeric literals like "1.0".
+class NumberExprAST : public ExprAST {
+ double Val;
+public:
+ NumberExprAST(double val) : Val(val) {}
+ virtual Value *Codegen();
+};
+
+/// VariableExprAST - Expression class for referencing a variable, like "a".
+class VariableExprAST : public ExprAST {
+ std::string Name;
+public:
+ VariableExprAST(const std::string &amp;name) : Name(name) {}
+ virtual Value *Codegen();
+};
+
+/// BinaryExprAST - Expression class for a binary operator.
+class BinaryExprAST : public ExprAST {
+ char Op;
+ ExprAST *LHS, *RHS;
+public:
+ BinaryExprAST(char op, ExprAST *lhs, ExprAST *rhs)
+ : Op(op), LHS(lhs), RHS(rhs) {}
+ virtual Value *Codegen();
+};
+
+/// CallExprAST - Expression class for function calls.
+class CallExprAST : public ExprAST {
+ std::string Callee;
+ std::vector&lt;ExprAST*&gt; Args;
+public:
+ CallExprAST(const std::string &amp;callee, std::vector&lt;ExprAST*&gt; &amp;args)
+ : Callee(callee), Args(args) {}
+ virtual Value *Codegen();
+};
+
+/// PrototypeAST - This class represents the "prototype" for a function,
+/// which captures its name, and its argument names (thus implicitly the number
+/// of arguments the function takes).
+class PrototypeAST {
+ std::string Name;
+ std::vector&lt;std::string&gt; Args;
+public:
+ PrototypeAST(const std::string &amp;name, const std::vector&lt;std::string&gt; &amp;args)
+ : Name(name), Args(args) {}
+
+ Function *Codegen();
+};
+
+/// FunctionAST - This class represents a function definition itself.
+class FunctionAST {
+ PrototypeAST *Proto;
+ ExprAST *Body;
+public:
+ FunctionAST(PrototypeAST *proto, ExprAST *body)
+ : Proto(proto), Body(body) {}
+
+ Function *Codegen();
+};
+
+//===----------------------------------------------------------------------===//
+// Parser
+//===----------------------------------------------------------------------===//
+
+/// CurTok/getNextToken - Provide a simple token buffer. CurTok is the current
+/// token the parser is looking at. getNextToken reads another token from the
+/// lexer and updates CurTok with its results.
+static int CurTok;
+static int getNextToken() {
+ return CurTok = gettok();
+}
+
+/// BinopPrecedence - This holds the precedence for each binary operator that is
+/// defined.
+static std::map&lt;char, int&gt; BinopPrecedence;
+
+/// GetTokPrecedence - Get the precedence of the pending binary operator token.
+static int GetTokPrecedence() {
+ if (!isascii(CurTok))
+ return -1;
+
+ // Make sure it's a declared binop.
+ int TokPrec = BinopPrecedence[CurTok];
+ if (TokPrec &lt;= 0) return -1;
+ return TokPrec;
+}
+
+/// Error* - These are little helper functions for error handling.
+ExprAST *Error(const char *Str) { fprintf(stderr, "Error: %s\n", Str);return 0;}
+PrototypeAST *ErrorP(const char *Str) { Error(Str); return 0; }
+FunctionAST *ErrorF(const char *Str) { Error(Str); return 0; }
+
+static ExprAST *ParseExpression();
+
+/// identifierexpr
+/// ::= identifier
+/// ::= identifier '(' expression* ')'
+static ExprAST *ParseIdentifierExpr() {
+ std::string IdName = IdentifierStr;
+
+ getNextToken(); // eat identifier.
+
+ if (CurTok != '(') // Simple variable ref.
+ return new VariableExprAST(IdName);
+
+ // Call.
+ getNextToken(); // eat (
+ std::vector&lt;ExprAST*&gt; Args;
+ if (CurTok != ')') {
+ while (1) {
+ ExprAST *Arg = ParseExpression();
+ if (!Arg) return 0;
+ Args.push_back(Arg);
+
+ if (CurTok == ')') break;
+
+ if (CurTok != ',')
+ return Error("Expected ')' or ',' in argument list");
+ getNextToken();
+ }
+ }
+
+ // Eat the ')'.
+ getNextToken();
+
+ return new CallExprAST(IdName, Args);
+}
+
+/// numberexpr ::= number
+static ExprAST *ParseNumberExpr() {
+ ExprAST *Result = new NumberExprAST(NumVal);
+ getNextToken(); // consume the number
+ return Result;
+}
+
+/// parenexpr ::= '(' expression ')'
+static ExprAST *ParseParenExpr() {
+ getNextToken(); // eat (.
+ ExprAST *V = ParseExpression();
+ if (!V) return 0;
+
+ if (CurTok != ')')
+ return Error("expected ')'");
+ getNextToken(); // eat ).
+ return V;
+}
+
+/// primary
+/// ::= identifierexpr
+/// ::= numberexpr
+/// ::= parenexpr
+static ExprAST *ParsePrimary() {
+ switch (CurTok) {
+ default: return Error("unknown token when expecting an expression");
+ case tok_identifier: return ParseIdentifierExpr();
+ case tok_number: return ParseNumberExpr();
+ case '(': return ParseParenExpr();
+ }
+}
+
+/// binoprhs
+/// ::= ('+' primary)*
+static ExprAST *ParseBinOpRHS(int ExprPrec, ExprAST *LHS) {
+ // If this is a binop, find its precedence.
+ while (1) {
+ int TokPrec = GetTokPrecedence();
+
+ // If this is a binop that binds at least as tightly as the current binop,
+ // consume it, otherwise we are done.
+ if (TokPrec &lt; ExprPrec)
+ return LHS;
+
+ // Okay, we know this is a binop.
+ int BinOp = CurTok;
+ getNextToken(); // eat binop
+
+ // Parse the primary expression after the binary operator.
+ ExprAST *RHS = ParsePrimary();
+ if (!RHS) return 0;
+
+ // If BinOp binds less tightly with RHS than the operator after RHS, let
+ // the pending operator take RHS as its LHS.
+ int NextPrec = GetTokPrecedence();
+ if (TokPrec &lt; NextPrec) {
+ RHS = ParseBinOpRHS(TokPrec+1, RHS);
+ if (RHS == 0) return 0;
+ }
+
+ // Merge LHS/RHS.
+ LHS = new BinaryExprAST(BinOp, LHS, RHS);
+ }
+}
+
+/// expression
+/// ::= primary binoprhs
+///
+static ExprAST *ParseExpression() {
+ ExprAST *LHS = ParsePrimary();
+ if (!LHS) return 0;
+
+ return ParseBinOpRHS(0, LHS);
+}
+
+/// prototype
+/// ::= id '(' id* ')'
+static PrototypeAST *ParsePrototype() {
+ if (CurTok != tok_identifier)
+ return ErrorP("Expected function name in prototype");
+
+ std::string FnName = IdentifierStr;
+ getNextToken();
+
+ if (CurTok != '(')
+ return ErrorP("Expected '(' in prototype");
+
+ std::vector&lt;std::string&gt; ArgNames;
+ while (getNextToken() == tok_identifier)
+ ArgNames.push_back(IdentifierStr);
+ if (CurTok != ')')
+ return ErrorP("Expected ')' in prototype");
+
+ // success.
+ getNextToken(); // eat ')'.
+
+ return new PrototypeAST(FnName, ArgNames);
+}
+
+/// definition ::= 'def' prototype expression
+static FunctionAST *ParseDefinition() {
+ getNextToken(); // eat def.
+ PrototypeAST *Proto = ParsePrototype();
+ if (Proto == 0) return 0;
+
+ if (ExprAST *E = ParseExpression())
+ return new FunctionAST(Proto, E);
+ return 0;
+}
+
+/// toplevelexpr ::= expression
+static FunctionAST *ParseTopLevelExpr() {
+ if (ExprAST *E = ParseExpression()) {
+ // Make an anonymous proto.
+ PrototypeAST *Proto = new PrototypeAST("", std::vector&lt;std::string&gt;());
+ return new FunctionAST(Proto, E);
+ }
+ return 0;
+}
+
+/// external ::= 'extern' prototype
+static PrototypeAST *ParseExtern() {
+ getNextToken(); // eat extern.
+ return ParsePrototype();
+}
+
+//===----------------------------------------------------------------------===//
+// Code Generation
+//===----------------------------------------------------------------------===//
+
+static Module *TheModule;
+static IRBuilder&lt;&gt; Builder(getGlobalContext());
+static std::map&lt;std::string, Value*&gt; NamedValues;
+
+Value *ErrorV(const char *Str) { Error(Str); return 0; }
+
+Value *NumberExprAST::Codegen() {
+ return ConstantFP::get(getGlobalContext(), APFloat(Val));
+}
+
+Value *VariableExprAST::Codegen() {
+ // Look this variable up in the function.
+ Value *V = NamedValues[Name];
+ return V ? V : ErrorV("Unknown variable name");
+}
+
+Value *BinaryExprAST::Codegen() {
+ Value *L = LHS-&gt;Codegen();
+ Value *R = RHS-&gt;Codegen();
+ if (L == 0 || R == 0) return 0;
+
+ switch (Op) {
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
+ case '&lt;':
+ L = Builder.CreateFCmpULT(L, R, "cmptmp");
+ // Convert bool 0/1 to double 0.0 or 1.0
+ return Builder.CreateUIToFP(L, Type::getDoubleTy(getGlobalContext()),
+ "booltmp");
+ default: return ErrorV("invalid binary operator");
+ }
+}
+
+Value *CallExprAST::Codegen() {
+ // Look up the name in the global module table.
+ Function *CalleeF = TheModule-&gt;getFunction(Callee);
+ if (CalleeF == 0)
+ return ErrorV("Unknown function referenced");
+
+ // If argument mismatch error.
+ if (CalleeF-&gt;arg_size() != Args.size())
+ return ErrorV("Incorrect # arguments passed");
+
+ std::vector&lt;Value*&gt; ArgsV;
+ for (unsigned i = 0, e = Args.size(); i != e; ++i) {
+ ArgsV.push_back(Args[i]-&gt;Codegen());
+ if (ArgsV.back() == 0) return 0;
+ }
+
+ return Builder.CreateCall(CalleeF, ArgsV, "calltmp");
+}
+
+Function *PrototypeAST::Codegen() {
+ // Make the function type: double(double,double) etc.
+ std::vector&lt;Type*&gt; Doubles(Args.size(),
+ Type::getDoubleTy(getGlobalContext()));
+ FunctionType *FT = FunctionType::get(Type::getDoubleTy(getGlobalContext()),
+ Doubles, false);
+
+ Function *F = Function::Create(FT, Function::ExternalLinkage, Name, TheModule);
+
+ // If F conflicted, there was already something named 'Name'. If it has a
+ // body, don't allow redefinition or reextern.
+ if (F-&gt;getName() != Name) {
+ // Delete the one we just made and get the existing one.
+ F-&gt;eraseFromParent();
+ F = TheModule-&gt;getFunction(Name);
+
+ // If F already has a body, reject this.
+ if (!F-&gt;empty()) {
+ ErrorF("redefinition of function");
+ return 0;
+ }
+
+ // If F took a different number of args, reject.
+ if (F-&gt;arg_size() != Args.size()) {
+ ErrorF("redefinition of function with different # args");
+ return 0;
+ }
+ }
+
+ // Set names for all arguments.
+ unsigned Idx = 0;
+ for (Function::arg_iterator AI = F-&gt;arg_begin(); Idx != Args.size();
+ ++AI, ++Idx) {
+ AI-&gt;setName(Args[Idx]);
+
+ // Add arguments to variable symbol table.
+ NamedValues[Args[Idx]] = AI;
+ }
+
+ return F;
+}
+
+Function *FunctionAST::Codegen() {
+ NamedValues.clear();
+
+ Function *TheFunction = Proto-&gt;Codegen();
+ if (TheFunction == 0)
+ return 0;
+
+ // Create a new basic block to start insertion into.
+ BasicBlock *BB = BasicBlock::Create(getGlobalContext(), "entry", TheFunction);
+ Builder.SetInsertPoint(BB);
+
+ if (Value *RetVal = Body-&gt;Codegen()) {
+ // Finish off the function.
+ Builder.CreateRet(RetVal);
+
+ // Validate the generated code, checking for consistency.
+ verifyFunction(*TheFunction);
+
+ return TheFunction;
+ }
+
+ // Error reading body, remove function.
+ TheFunction-&gt;eraseFromParent();
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Top-Level parsing and JIT Driver
+//===----------------------------------------------------------------------===//
+
+static void HandleDefinition() {
+ if (FunctionAST *F = ParseDefinition()) {
+ if (Function *LF = F-&gt;Codegen()) {
+ fprintf(stderr, "Read function definition:");
+ LF-&gt;dump();
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+static void HandleExtern() {
+ if (PrototypeAST *P = ParseExtern()) {
+ if (Function *F = P-&gt;Codegen()) {
+ fprintf(stderr, "Read extern: ");
+ F-&gt;dump();
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+static void HandleTopLevelExpression() {
+ // Evaluate a top-level expression into an anonymous function.
+ if (FunctionAST *F = ParseTopLevelExpr()) {
+ if (Function *LF = F-&gt;Codegen()) {
+ fprintf(stderr, "Read top-level expression:");
+ LF-&gt;dump();
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+/// top ::= definition | external | expression | ';'
+static void MainLoop() {
+ while (1) {
+ fprintf(stderr, "ready&gt; ");
+ switch (CurTok) {
+ case tok_eof: return;
+ case ';': getNextToken(); break; // ignore top-level semicolons.
+ case tok_def: HandleDefinition(); break;
+ case tok_extern: HandleExtern(); break;
+ default: HandleTopLevelExpression(); break;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// "Library" functions that can be "extern'd" from user code.
+//===----------------------------------------------------------------------===//
+
+/// putchard - putchar that takes a double and returns 0.
+extern "C"
+double putchard(double X) {
+ putchar((char)X);
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Main driver code.
+//===----------------------------------------------------------------------===//
+
+int main() {
+ LLVMContext &amp;Context = getGlobalContext();
+
+ // Install standard binary operators.
+ // 1 is lowest precedence.
+ BinopPrecedence['&lt;'] = 10;
+ BinopPrecedence['+'] = 20;
+ BinopPrecedence['-'] = 20;
+ BinopPrecedence['*'] = 40; // highest.
+
+ // Prime the first token.
+ fprintf(stderr, "ready&gt; ");
+ getNextToken();
+
+ // Make the module, which holds all the code.
+ TheModule = new Module("my cool jit", Context);
+
+ // Run the main "interpreter loop" now.
+ MainLoop();
+
+ // Print out all of the generated code.
+ TheModule-&gt;dump();
+
+ return 0;
+}
+</pre>
+</div>
+<a href="LangImpl4.html">Next: Adding JIT and Optimizer Support</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/LangImpl4.html b/docs/tutorial/LangImpl4.html
new file mode 100644
index 00000000000..453e43a02e5
--- /dev/null
+++ b/docs/tutorial/LangImpl4.html
@@ -0,0 +1,1152 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Adding JIT and Optimizer Support</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Adding JIT and Optimizer Support</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 4
+ <ol>
+ <li><a href="#intro">Chapter 4 Introduction</a></li>
+ <li><a href="#trivialconstfold">Trivial Constant Folding</a></li>
+ <li><a href="#optimizerpasses">LLVM Optimization Passes</a></li>
+ <li><a href="#jit">Adding a JIT Compiler</a></li>
+ <li><a href="#code">Full Code Listing</a></li>
+ </ol>
+</li>
+<li><a href="LangImpl5.html">Chapter 5</a>: Extending the Language: Control
+Flow</li>
+</ul>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Chapter 4 Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to Chapter 4 of the "<a href="index.html">Implementing a language
+with LLVM</a>" tutorial. Chapters 1-3 described the implementation of a simple
+language and added support for generating LLVM IR. This chapter describes
+two new techniques: adding optimizer support to your language, and adding JIT
+compiler support. These additions will demonstrate how to get nice, efficient code
+for the Kaleidoscope language.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="trivialconstfold">Trivial Constant Folding</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Our demonstration for Chapter 3 is elegant and easy to extend. Unfortunately,
+it does not produce wonderful code. The IRBuilder, however, does give us
+obvious optimizations when compiling simple code:</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def test(x) 1+2+x;</b>
+Read function definition:
+define double @test(double %x) {
+entry:
+ %addtmp = fadd double 3.000000e+00, %x
+ ret double %addtmp
+}
+</pre>
+</div>
+
+<p>This code is not a literal transcription of the AST built by parsing the
+input. That would be:
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def test(x) 1+2+x;</b>
+Read function definition:
+define double @test(double %x) {
+entry:
+ %addtmp = fadd double 2.000000e+00, 1.000000e+00
+ %addtmp1 = fadd double %addtmp, %x
+ ret double %addtmp1
+}
+</pre>
+</div>
+
+<p>Constant folding, as seen above, in particular, is a very common and very
+important optimization: so much so that many language implementors implement
+constant folding support in their AST representation.</p>
+
+<p>With LLVM, you don't need this support in the AST. Since all calls to build
+LLVM IR go through the LLVM IR builder, the builder itself checked to see if
+there was a constant folding opportunity when you call it. If so, it just does
+the constant fold and return the constant instead of creating an instruction.
+
+<p>Well, that was easy :). In practice, we recommend always using
+<tt>IRBuilder</tt> when generating code like this. It has no
+"syntactic overhead" for its use (you don't have to uglify your compiler with
+constant checks everywhere) and it can dramatically reduce the amount of
+LLVM IR that is generated in some cases (particular for languages with a macro
+preprocessor or that use a lot of constants).</p>
+
+<p>On the other hand, the <tt>IRBuilder</tt> is limited by the fact
+that it does all of its analysis inline with the code as it is built. If you
+take a slightly more complex example:</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def test(x) (1+2+x)*(x+(1+2));</b>
+ready> Read function definition:
+define double @test(double %x) {
+entry:
+ %addtmp = fadd double 3.000000e+00, %x
+ %addtmp1 = fadd double %x, 3.000000e+00
+ %multmp = fmul double %addtmp, %addtmp1
+ ret double %multmp
+}
+</pre>
+</div>
+
+<p>In this case, the LHS and RHS of the multiplication are the same value. We'd
+really like to see this generate "<tt>tmp = x+3; result = tmp*tmp;</tt>" instead
+of computing "<tt>x+3</tt>" twice.</p>
+
+<p>Unfortunately, no amount of local analysis will be able to detect and correct
+this. This requires two transformations: reassociation of expressions (to
+make the add's lexically identical) and Common Subexpression Elimination (CSE)
+to delete the redundant add instruction. Fortunately, LLVM provides a broad
+range of optimizations that you can use, in the form of "passes".</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="optimizerpasses">LLVM Optimization Passes</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>LLVM provides many optimization passes, which do many different sorts of
+things and have different tradeoffs. Unlike other systems, LLVM doesn't hold
+to the mistaken notion that one set of optimizations is right for all languages
+and for all situations. LLVM allows a compiler implementor to make complete
+decisions about what optimizations to use, in which order, and in what
+situation.</p>
+
+<p>As a concrete example, LLVM supports both "whole module" passes, which look
+across as large of body of code as they can (often a whole file, but if run
+at link time, this can be a substantial portion of the whole program). It also
+supports and includes "per-function" passes which just operate on a single
+function at a time, without looking at other functions. For more information
+on passes and how they are run, see the <a href="../WritingAnLLVMPass.html">How
+to Write a Pass</a> document and the <a href="../Passes.html">List of LLVM
+Passes</a>.</p>
+
+<p>For Kaleidoscope, we are currently generating functions on the fly, one at
+a time, as the user types them in. We aren't shooting for the ultimate
+optimization experience in this setting, but we also want to catch the easy and
+quick stuff where possible. As such, we will choose to run a few per-function
+optimizations as the user types the function in. If we wanted to make a "static
+Kaleidoscope compiler", we would use exactly the code we have now, except that
+we would defer running the optimizer until the entire file has been parsed.</p>
+
+<p>In order to get per-function optimizations going, we need to set up a
+<a href="../WritingAnLLVMPass.html#passmanager">FunctionPassManager</a> to hold and
+organize the LLVM optimizations that we want to run. Once we have that, we can
+add a set of optimizations to run. The code looks like this:</p>
+
+<div class="doc_code">
+<pre>
+ FunctionPassManager OurFPM(TheModule);
+
+ // Set up the optimizer pipeline. Start with registering info about how the
+ // target lays out data structures.
+ OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData()));
+ // Provide basic AliasAnalysis support for GVN.
+ OurFPM.add(createBasicAliasAnalysisPass());
+ // Do simple "peephole" optimizations and bit-twiddling optzns.
+ OurFPM.add(createInstructionCombiningPass());
+ // Reassociate expressions.
+ OurFPM.add(createReassociatePass());
+ // Eliminate Common SubExpressions.
+ OurFPM.add(createGVNPass());
+ // Simplify the control flow graph (deleting unreachable blocks, etc).
+ OurFPM.add(createCFGSimplificationPass());
+
+ OurFPM.doInitialization();
+
+ // Set the global so the code gen can use this.
+ TheFPM = &amp;OurFPM;
+
+ // Run the main "interpreter loop" now.
+ MainLoop();
+</pre>
+</div>
+
+<p>This code defines a <tt>FunctionPassManager</tt>, "<tt>OurFPM</tt>". It
+requires a pointer to the <tt>Module</tt> to construct itself. Once it is set
+up, we use a series of "add" calls to add a bunch of LLVM passes. The first
+pass is basically boilerplate, it adds a pass so that later optimizations know
+how the data structures in the program are laid out. The
+"<tt>TheExecutionEngine</tt>" variable is related to the JIT, which we will get
+to in the next section.</p>
+
+<p>In this case, we choose to add 4 optimization passes. The passes we chose
+here are a pretty standard set of "cleanup" optimizations that are useful for
+a wide variety of code. I won't delve into what they do but, believe me,
+they are a good starting place :).</p>
+
+<p>Once the PassManager is set up, we need to make use of it. We do this by
+running it after our newly created function is constructed (in
+<tt>FunctionAST::Codegen</tt>), but before it is returned to the client:</p>
+
+<div class="doc_code">
+<pre>
+ if (Value *RetVal = Body->Codegen()) {
+ // Finish off the function.
+ Builder.CreateRet(RetVal);
+
+ // Validate the generated code, checking for consistency.
+ verifyFunction(*TheFunction);
+
+ <b>// Optimize the function.
+ TheFPM-&gt;run(*TheFunction);</b>
+
+ return TheFunction;
+ }
+</pre>
+</div>
+
+<p>As you can see, this is pretty straightforward. The
+<tt>FunctionPassManager</tt> optimizes and updates the LLVM Function* in place,
+improving (hopefully) its body. With this in place, we can try our test above
+again:</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def test(x) (1+2+x)*(x+(1+2));</b>
+ready> Read function definition:
+define double @test(double %x) {
+entry:
+ %addtmp = fadd double %x, 3.000000e+00
+ %multmp = fmul double %addtmp, %addtmp
+ ret double %multmp
+}
+</pre>
+</div>
+
+<p>As expected, we now get our nicely optimized code, saving a floating point
+add instruction from every execution of this function.</p>
+
+<p>LLVM provides a wide variety of optimizations that can be used in certain
+circumstances. Some <a href="../Passes.html">documentation about the various
+passes</a> is available, but it isn't very complete. Another good source of
+ideas can come from looking at the passes that <tt>Clang</tt> runs to get
+started. The "<tt>opt</tt>" tool allows you to experiment with passes from the
+command line, so you can see if they do anything.</p>
+
+<p>Now that we have reasonable code coming out of our front-end, lets talk about
+executing it!</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="jit">Adding a JIT Compiler</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Code that is available in LLVM IR can have a wide variety of tools
+applied to it. For example, you can run optimizations on it (as we did above),
+you can dump it out in textual or binary forms, you can compile the code to an
+assembly file (.s) for some target, or you can JIT compile it. The nice thing
+about the LLVM IR representation is that it is the "common currency" between
+many different parts of the compiler.
+</p>
+
+<p>In this section, we'll add JIT compiler support to our interpreter. The
+basic idea that we want for Kaleidoscope is to have the user enter function
+bodies as they do now, but immediately evaluate the top-level expressions they
+type in. For example, if they type in "1 + 2;", we should evaluate and print
+out 3. If they define a function, they should be able to call it from the
+command line.</p>
+
+<p>In order to do this, we first declare and initialize the JIT. This is done
+by adding a global variable and a call in <tt>main</tt>:</p>
+
+<div class="doc_code">
+<pre>
+<b>static ExecutionEngine *TheExecutionEngine;</b>
+...
+int main() {
+ ..
+ <b>// Create the JIT. This takes ownership of the module.
+ TheExecutionEngine = EngineBuilder(TheModule).create();</b>
+ ..
+}
+</pre>
+</div>
+
+<p>This creates an abstract "Execution Engine" which can be either a JIT
+compiler or the LLVM interpreter. LLVM will automatically pick a JIT compiler
+for you if one is available for your platform, otherwise it will fall back to
+the interpreter.</p>
+
+<p>Once the <tt>ExecutionEngine</tt> is created, the JIT is ready to be used.
+There are a variety of APIs that are useful, but the simplest one is the
+"<tt>getPointerToFunction(F)</tt>" method. This method JIT compiles the
+specified LLVM Function and returns a function pointer to the generated machine
+code. In our case, this means that we can change the code that parses a
+top-level expression to look like this:</p>
+
+<div class="doc_code">
+<pre>
+static void HandleTopLevelExpression() {
+ // Evaluate a top-level expression into an anonymous function.
+ if (FunctionAST *F = ParseTopLevelExpr()) {
+ if (Function *LF = F-&gt;Codegen()) {
+ LF->dump(); // Dump the function for exposition purposes.
+
+ <b>// JIT the function, returning a function pointer.
+ void *FPtr = TheExecutionEngine-&gt;getPointerToFunction(LF);
+
+ // Cast it to the right type (takes no arguments, returns a double) so we
+ // can call it as a native function.
+ double (*FP)() = (double (*)())(intptr_t)FPtr;
+ fprintf(stderr, "Evaluated to %f\n", FP());</b>
+ }
+</pre>
+</div>
+
+<p>Recall that we compile top-level expressions into a self-contained LLVM
+function that takes no arguments and returns the computed double. Because the
+LLVM JIT compiler matches the native platform ABI, this means that you can just
+cast the result pointer to a function pointer of that type and call it directly.
+This means, there is no difference between JIT compiled code and native machine
+code that is statically linked into your application.</p>
+
+<p>With just these two changes, lets see how Kaleidoscope works now!</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>4+5;</b>
+Read top-level expression:
+define double @0() {
+entry:
+ ret double 9.000000e+00
+}
+
+<em>Evaluated to 9.000000</em>
+</pre>
+</div>
+
+<p>Well this looks like it is basically working. The dump of the function
+shows the "no argument function that always returns double" that we synthesize
+for each top-level expression that is typed in. This demonstrates very basic
+functionality, but can we do more?</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def testfunc(x y) x + y*2; </b>
+Read function definition:
+define double @testfunc(double %x, double %y) {
+entry:
+ %multmp = fmul double %y, 2.000000e+00
+ %addtmp = fadd double %multmp, %x
+ ret double %addtmp
+}
+
+ready&gt; <b>testfunc(4, 10);</b>
+Read top-level expression:
+define double @1() {
+entry:
+ %calltmp = call double @testfunc(double 4.000000e+00, double 1.000000e+01)
+ ret double %calltmp
+}
+
+<em>Evaluated to 24.000000</em>
+</pre>
+</div>
+
+<p>This illustrates that we can now call user code, but there is something a bit
+subtle going on here. Note that we only invoke the JIT on the anonymous
+functions that <em>call testfunc</em>, but we never invoked it
+on <em>testfunc</em> itself. What actually happened here is that the JIT
+scanned for all non-JIT'd functions transitively called from the anonymous
+function and compiled all of them before returning
+from <tt>getPointerToFunction()</tt>.</p>
+
+<p>The JIT provides a number of other more advanced interfaces for things like
+freeing allocated machine code, rejit'ing functions to update them, etc.
+However, even with this simple code, we get some surprisingly powerful
+capabilities - check this out (I removed the dump of the anonymous functions,
+you should get the idea by now :) :</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>extern sin(x);</b>
+Read extern:
+declare double @sin(double)
+
+ready&gt; <b>extern cos(x);</b>
+Read extern:
+declare double @cos(double)
+
+ready&gt; <b>sin(1.0);</b>
+Read top-level expression:
+define double @2() {
+entry:
+ ret double 0x3FEAED548F090CEE
+}
+
+<em>Evaluated to 0.841471</em>
+
+ready&gt; <b>def foo(x) sin(x)*sin(x) + cos(x)*cos(x);</b>
+Read function definition:
+define double @foo(double %x) {
+entry:
+ %calltmp = call double @sin(double %x)
+ %multmp = fmul double %calltmp, %calltmp
+ %calltmp2 = call double @cos(double %x)
+ %multmp4 = fmul double %calltmp2, %calltmp2
+ %addtmp = fadd double %multmp, %multmp4
+ ret double %addtmp
+}
+
+ready&gt; <b>foo(4.0);</b>
+Read top-level expression:
+define double @3() {
+entry:
+ %calltmp = call double @foo(double 4.000000e+00)
+ ret double %calltmp
+}
+
+<em>Evaluated to 1.000000</em>
+</pre>
+</div>
+
+<p>Whoa, how does the JIT know about sin and cos? The answer is surprisingly
+simple: in this
+example, the JIT started execution of a function and got to a function call. It
+realized that the function was not yet JIT compiled and invoked the standard set
+of routines to resolve the function. In this case, there is no body defined
+for the function, so the JIT ended up calling "<tt>dlsym("sin")</tt>" on the
+Kaleidoscope process itself.
+Since "<tt>sin</tt>" is defined within the JIT's address space, it simply
+patches up calls in the module to call the libm version of <tt>sin</tt>
+directly.</p>
+
+<p>The LLVM JIT provides a number of interfaces (look in the
+<tt>ExecutionEngine.h</tt> file) for controlling how unknown functions get
+resolved. It allows you to establish explicit mappings between IR objects and
+addresses (useful for LLVM global variables that you want to map to static
+tables, for example), allows you to dynamically decide on the fly based on the
+function name, and even allows you to have the JIT compile functions lazily the
+first time they're called.</p>
+
+<p>One interesting application of this is that we can now extend the language
+by writing arbitrary C++ code to implement operations. For example, if we add:
+</p>
+
+<div class="doc_code">
+<pre>
+/// putchard - putchar that takes a double and returns 0.
+extern "C"
+double putchard(double X) {
+ putchar((char)X);
+ return 0;
+}
+</pre>
+</div>
+
+<p>Now we can produce simple output to the console by using things like:
+"<tt>extern putchard(x); putchard(120);</tt>", which prints a lowercase 'x' on
+the console (120 is the ASCII code for 'x'). Similar code could be used to
+implement file I/O, console input, and many other capabilities in
+Kaleidoscope.</p>
+
+<p>This completes the JIT and optimizer chapter of the Kaleidoscope tutorial. At
+this point, we can compile a non-Turing-complete programming language, optimize
+and JIT compile it in a user-driven way. Next up we'll look into <a
+href="LangImpl5.html">extending the language with control flow constructs</a>,
+tackling some interesting LLVM IR issues along the way.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="code">Full Code Listing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Here is the complete code listing for our running example, enhanced with the
+LLVM JIT and optimizer. To build this example, use:
+</p>
+
+<div class="doc_code">
+<pre>
+# Compile
+clang++ -g toy.cpp `llvm-config --cppflags --ldflags --libs core jit native` -O3 -o toy
+# Run
+./toy
+</pre>
+</div>
+
+<p>
+If you are compiling this on Linux, make sure to add the "-rdynamic" option
+as well. This makes sure that the external functions are resolved properly
+at runtime.</p>
+
+<p>Here is the code:</p>
+
+<div class="doc_code">
+<pre>
+#include "llvm/DerivedTypes.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/JIT.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Support/TargetSelect.h"
+#include &lt;cstdio&gt;
+#include &lt;string&gt;
+#include &lt;map&gt;
+#include &lt;vector&gt;
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Lexer
+//===----------------------------------------------------------------------===//
+
+// The lexer returns tokens [0-255] if it is an unknown character, otherwise one
+// of these for known things.
+enum Token {
+ tok_eof = -1,
+
+ // commands
+ tok_def = -2, tok_extern = -3,
+
+ // primary
+ tok_identifier = -4, tok_number = -5
+};
+
+static std::string IdentifierStr; // Filled in if tok_identifier
+static double NumVal; // Filled in if tok_number
+
+/// gettok - Return the next token from standard input.
+static int gettok() {
+ static int LastChar = ' ';
+
+ // Skip any whitespace.
+ while (isspace(LastChar))
+ LastChar = getchar();
+
+ if (isalpha(LastChar)) { // identifier: [a-zA-Z][a-zA-Z0-9]*
+ IdentifierStr = LastChar;
+ while (isalnum((LastChar = getchar())))
+ IdentifierStr += LastChar;
+
+ if (IdentifierStr == "def") return tok_def;
+ if (IdentifierStr == "extern") return tok_extern;
+ return tok_identifier;
+ }
+
+ if (isdigit(LastChar) || LastChar == '.') { // Number: [0-9.]+
+ std::string NumStr;
+ do {
+ NumStr += LastChar;
+ LastChar = getchar();
+ } while (isdigit(LastChar) || LastChar == '.');
+
+ NumVal = strtod(NumStr.c_str(), 0);
+ return tok_number;
+ }
+
+ if (LastChar == '#') {
+ // Comment until end of line.
+ do LastChar = getchar();
+ while (LastChar != EOF &amp;&amp; LastChar != '\n' &amp;&amp; LastChar != '\r');
+
+ if (LastChar != EOF)
+ return gettok();
+ }
+
+ // Check for end of file. Don't eat the EOF.
+ if (LastChar == EOF)
+ return tok_eof;
+
+ // Otherwise, just return the character as its ascii value.
+ int ThisChar = LastChar;
+ LastChar = getchar();
+ return ThisChar;
+}
+
+//===----------------------------------------------------------------------===//
+// Abstract Syntax Tree (aka Parse Tree)
+//===----------------------------------------------------------------------===//
+
+/// ExprAST - Base class for all expression nodes.
+class ExprAST {
+public:
+ virtual ~ExprAST() {}
+ virtual Value *Codegen() = 0;
+};
+
+/// NumberExprAST - Expression class for numeric literals like "1.0".
+class NumberExprAST : public ExprAST {
+ double Val;
+public:
+ NumberExprAST(double val) : Val(val) {}
+ virtual Value *Codegen();
+};
+
+/// VariableExprAST - Expression class for referencing a variable, like "a".
+class VariableExprAST : public ExprAST {
+ std::string Name;
+public:
+ VariableExprAST(const std::string &amp;name) : Name(name) {}
+ virtual Value *Codegen();
+};
+
+/// BinaryExprAST - Expression class for a binary operator.
+class BinaryExprAST : public ExprAST {
+ char Op;
+ ExprAST *LHS, *RHS;
+public:
+ BinaryExprAST(char op, ExprAST *lhs, ExprAST *rhs)
+ : Op(op), LHS(lhs), RHS(rhs) {}
+ virtual Value *Codegen();
+};
+
+/// CallExprAST - Expression class for function calls.
+class CallExprAST : public ExprAST {
+ std::string Callee;
+ std::vector&lt;ExprAST*&gt; Args;
+public:
+ CallExprAST(const std::string &amp;callee, std::vector&lt;ExprAST*&gt; &amp;args)
+ : Callee(callee), Args(args) {}
+ virtual Value *Codegen();
+};
+
+/// PrototypeAST - This class represents the "prototype" for a function,
+/// which captures its name, and its argument names (thus implicitly the number
+/// of arguments the function takes).
+class PrototypeAST {
+ std::string Name;
+ std::vector&lt;std::string&gt; Args;
+public:
+ PrototypeAST(const std::string &amp;name, const std::vector&lt;std::string&gt; &amp;args)
+ : Name(name), Args(args) {}
+
+ Function *Codegen();
+};
+
+/// FunctionAST - This class represents a function definition itself.
+class FunctionAST {
+ PrototypeAST *Proto;
+ ExprAST *Body;
+public:
+ FunctionAST(PrototypeAST *proto, ExprAST *body)
+ : Proto(proto), Body(body) {}
+
+ Function *Codegen();
+};
+
+//===----------------------------------------------------------------------===//
+// Parser
+//===----------------------------------------------------------------------===//
+
+/// CurTok/getNextToken - Provide a simple token buffer. CurTok is the current
+/// token the parser is looking at. getNextToken reads another token from the
+/// lexer and updates CurTok with its results.
+static int CurTok;
+static int getNextToken() {
+ return CurTok = gettok();
+}
+
+/// BinopPrecedence - This holds the precedence for each binary operator that is
+/// defined.
+static std::map&lt;char, int&gt; BinopPrecedence;
+
+/// GetTokPrecedence - Get the precedence of the pending binary operator token.
+static int GetTokPrecedence() {
+ if (!isascii(CurTok))
+ return -1;
+
+ // Make sure it's a declared binop.
+ int TokPrec = BinopPrecedence[CurTok];
+ if (TokPrec &lt;= 0) return -1;
+ return TokPrec;
+}
+
+/// Error* - These are little helper functions for error handling.
+ExprAST *Error(const char *Str) { fprintf(stderr, "Error: %s\n", Str);return 0;}
+PrototypeAST *ErrorP(const char *Str) { Error(Str); return 0; }
+FunctionAST *ErrorF(const char *Str) { Error(Str); return 0; }
+
+static ExprAST *ParseExpression();
+
+/// identifierexpr
+/// ::= identifier
+/// ::= identifier '(' expression* ')'
+static ExprAST *ParseIdentifierExpr() {
+ std::string IdName = IdentifierStr;
+
+ getNextToken(); // eat identifier.
+
+ if (CurTok != '(') // Simple variable ref.
+ return new VariableExprAST(IdName);
+
+ // Call.
+ getNextToken(); // eat (
+ std::vector&lt;ExprAST*&gt; Args;
+ if (CurTok != ')') {
+ while (1) {
+ ExprAST *Arg = ParseExpression();
+ if (!Arg) return 0;
+ Args.push_back(Arg);
+
+ if (CurTok == ')') break;
+
+ if (CurTok != ',')
+ return Error("Expected ')' or ',' in argument list");
+ getNextToken();
+ }
+ }
+
+ // Eat the ')'.
+ getNextToken();
+
+ return new CallExprAST(IdName, Args);
+}
+
+/// numberexpr ::= number
+static ExprAST *ParseNumberExpr() {
+ ExprAST *Result = new NumberExprAST(NumVal);
+ getNextToken(); // consume the number
+ return Result;
+}
+
+/// parenexpr ::= '(' expression ')'
+static ExprAST *ParseParenExpr() {
+ getNextToken(); // eat (.
+ ExprAST *V = ParseExpression();
+ if (!V) return 0;
+
+ if (CurTok != ')')
+ return Error("expected ')'");
+ getNextToken(); // eat ).
+ return V;
+}
+
+/// primary
+/// ::= identifierexpr
+/// ::= numberexpr
+/// ::= parenexpr
+static ExprAST *ParsePrimary() {
+ switch (CurTok) {
+ default: return Error("unknown token when expecting an expression");
+ case tok_identifier: return ParseIdentifierExpr();
+ case tok_number: return ParseNumberExpr();
+ case '(': return ParseParenExpr();
+ }
+}
+
+/// binoprhs
+/// ::= ('+' primary)*
+static ExprAST *ParseBinOpRHS(int ExprPrec, ExprAST *LHS) {
+ // If this is a binop, find its precedence.
+ while (1) {
+ int TokPrec = GetTokPrecedence();
+
+ // If this is a binop that binds at least as tightly as the current binop,
+ // consume it, otherwise we are done.
+ if (TokPrec &lt; ExprPrec)
+ return LHS;
+
+ // Okay, we know this is a binop.
+ int BinOp = CurTok;
+ getNextToken(); // eat binop
+
+ // Parse the primary expression after the binary operator.
+ ExprAST *RHS = ParsePrimary();
+ if (!RHS) return 0;
+
+ // If BinOp binds less tightly with RHS than the operator after RHS, let
+ // the pending operator take RHS as its LHS.
+ int NextPrec = GetTokPrecedence();
+ if (TokPrec &lt; NextPrec) {
+ RHS = ParseBinOpRHS(TokPrec+1, RHS);
+ if (RHS == 0) return 0;
+ }
+
+ // Merge LHS/RHS.
+ LHS = new BinaryExprAST(BinOp, LHS, RHS);
+ }
+}
+
+/// expression
+/// ::= primary binoprhs
+///
+static ExprAST *ParseExpression() {
+ ExprAST *LHS = ParsePrimary();
+ if (!LHS) return 0;
+
+ return ParseBinOpRHS(0, LHS);
+}
+
+/// prototype
+/// ::= id '(' id* ')'
+static PrototypeAST *ParsePrototype() {
+ if (CurTok != tok_identifier)
+ return ErrorP("Expected function name in prototype");
+
+ std::string FnName = IdentifierStr;
+ getNextToken();
+
+ if (CurTok != '(')
+ return ErrorP("Expected '(' in prototype");
+
+ std::vector&lt;std::string&gt; ArgNames;
+ while (getNextToken() == tok_identifier)
+ ArgNames.push_back(IdentifierStr);
+ if (CurTok != ')')
+ return ErrorP("Expected ')' in prototype");
+
+ // success.
+ getNextToken(); // eat ')'.
+
+ return new PrototypeAST(FnName, ArgNames);
+}
+
+/// definition ::= 'def' prototype expression
+static FunctionAST *ParseDefinition() {
+ getNextToken(); // eat def.
+ PrototypeAST *Proto = ParsePrototype();
+ if (Proto == 0) return 0;
+
+ if (ExprAST *E = ParseExpression())
+ return new FunctionAST(Proto, E);
+ return 0;
+}
+
+/// toplevelexpr ::= expression
+static FunctionAST *ParseTopLevelExpr() {
+ if (ExprAST *E = ParseExpression()) {
+ // Make an anonymous proto.
+ PrototypeAST *Proto = new PrototypeAST("", std::vector&lt;std::string&gt;());
+ return new FunctionAST(Proto, E);
+ }
+ return 0;
+}
+
+/// external ::= 'extern' prototype
+static PrototypeAST *ParseExtern() {
+ getNextToken(); // eat extern.
+ return ParsePrototype();
+}
+
+//===----------------------------------------------------------------------===//
+// Code Generation
+//===----------------------------------------------------------------------===//
+
+static Module *TheModule;
+static IRBuilder&lt;&gt; Builder(getGlobalContext());
+static std::map&lt;std::string, Value*&gt; NamedValues;
+static FunctionPassManager *TheFPM;
+
+Value *ErrorV(const char *Str) { Error(Str); return 0; }
+
+Value *NumberExprAST::Codegen() {
+ return ConstantFP::get(getGlobalContext(), APFloat(Val));
+}
+
+Value *VariableExprAST::Codegen() {
+ // Look this variable up in the function.
+ Value *V = NamedValues[Name];
+ return V ? V : ErrorV("Unknown variable name");
+}
+
+Value *BinaryExprAST::Codegen() {
+ Value *L = LHS-&gt;Codegen();
+ Value *R = RHS-&gt;Codegen();
+ if (L == 0 || R == 0) return 0;
+
+ switch (Op) {
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
+ case '&lt;':
+ L = Builder.CreateFCmpULT(L, R, "cmptmp");
+ // Convert bool 0/1 to double 0.0 or 1.0
+ return Builder.CreateUIToFP(L, Type::getDoubleTy(getGlobalContext()),
+ "booltmp");
+ default: return ErrorV("invalid binary operator");
+ }
+}
+
+Value *CallExprAST::Codegen() {
+ // Look up the name in the global module table.
+ Function *CalleeF = TheModule-&gt;getFunction(Callee);
+ if (CalleeF == 0)
+ return ErrorV("Unknown function referenced");
+
+ // If argument mismatch error.
+ if (CalleeF-&gt;arg_size() != Args.size())
+ return ErrorV("Incorrect # arguments passed");
+
+ std::vector&lt;Value*&gt; ArgsV;
+ for (unsigned i = 0, e = Args.size(); i != e; ++i) {
+ ArgsV.push_back(Args[i]-&gt;Codegen());
+ if (ArgsV.back() == 0) return 0;
+ }
+
+ return Builder.CreateCall(CalleeF, ArgsV, "calltmp");
+}
+
+Function *PrototypeAST::Codegen() {
+ // Make the function type: double(double,double) etc.
+ std::vector&lt;Type*&gt; Doubles(Args.size(),
+ Type::getDoubleTy(getGlobalContext()));
+ FunctionType *FT = FunctionType::get(Type::getDoubleTy(getGlobalContext()),
+ Doubles, false);
+
+ Function *F = Function::Create(FT, Function::ExternalLinkage, Name, TheModule);
+
+ // If F conflicted, there was already something named 'Name'. If it has a
+ // body, don't allow redefinition or reextern.
+ if (F-&gt;getName() != Name) {
+ // Delete the one we just made and get the existing one.
+ F-&gt;eraseFromParent();
+ F = TheModule-&gt;getFunction(Name);
+
+ // If F already has a body, reject this.
+ if (!F-&gt;empty()) {
+ ErrorF("redefinition of function");
+ return 0;
+ }
+
+ // If F took a different number of args, reject.
+ if (F-&gt;arg_size() != Args.size()) {
+ ErrorF("redefinition of function with different # args");
+ return 0;
+ }
+ }
+
+ // Set names for all arguments.
+ unsigned Idx = 0;
+ for (Function::arg_iterator AI = F-&gt;arg_begin(); Idx != Args.size();
+ ++AI, ++Idx) {
+ AI-&gt;setName(Args[Idx]);
+
+ // Add arguments to variable symbol table.
+ NamedValues[Args[Idx]] = AI;
+ }
+
+ return F;
+}
+
+Function *FunctionAST::Codegen() {
+ NamedValues.clear();
+
+ Function *TheFunction = Proto-&gt;Codegen();
+ if (TheFunction == 0)
+ return 0;
+
+ // Create a new basic block to start insertion into.
+ BasicBlock *BB = BasicBlock::Create(getGlobalContext(), "entry", TheFunction);
+ Builder.SetInsertPoint(BB);
+
+ if (Value *RetVal = Body-&gt;Codegen()) {
+ // Finish off the function.
+ Builder.CreateRet(RetVal);
+
+ // Validate the generated code, checking for consistency.
+ verifyFunction(*TheFunction);
+
+ // Optimize the function.
+ TheFPM-&gt;run(*TheFunction);
+
+ return TheFunction;
+ }
+
+ // Error reading body, remove function.
+ TheFunction-&gt;eraseFromParent();
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Top-Level parsing and JIT Driver
+//===----------------------------------------------------------------------===//
+
+static ExecutionEngine *TheExecutionEngine;
+
+static void HandleDefinition() {
+ if (FunctionAST *F = ParseDefinition()) {
+ if (Function *LF = F-&gt;Codegen()) {
+ fprintf(stderr, "Read function definition:");
+ LF-&gt;dump();
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+static void HandleExtern() {
+ if (PrototypeAST *P = ParseExtern()) {
+ if (Function *F = P-&gt;Codegen()) {
+ fprintf(stderr, "Read extern: ");
+ F-&gt;dump();
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+static void HandleTopLevelExpression() {
+ // Evaluate a top-level expression into an anonymous function.
+ if (FunctionAST *F = ParseTopLevelExpr()) {
+ if (Function *LF = F-&gt;Codegen()) {
+ fprintf(stderr, "Read top-level expression:");
+ LF->dump();
+
+ // JIT the function, returning a function pointer.
+ void *FPtr = TheExecutionEngine-&gt;getPointerToFunction(LF);
+
+ // Cast it to the right type (takes no arguments, returns a double) so we
+ // can call it as a native function.
+ double (*FP)() = (double (*)())(intptr_t)FPtr;
+ fprintf(stderr, "Evaluated to %f\n", FP());
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+/// top ::= definition | external | expression | ';'
+static void MainLoop() {
+ while (1) {
+ fprintf(stderr, "ready&gt; ");
+ switch (CurTok) {
+ case tok_eof: return;
+ case ';': getNextToken(); break; // ignore top-level semicolons.
+ case tok_def: HandleDefinition(); break;
+ case tok_extern: HandleExtern(); break;
+ default: HandleTopLevelExpression(); break;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// "Library" functions that can be "extern'd" from user code.
+//===----------------------------------------------------------------------===//
+
+/// putchard - putchar that takes a double and returns 0.
+extern "C"
+double putchard(double X) {
+ putchar((char)X);
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Main driver code.
+//===----------------------------------------------------------------------===//
+
+int main() {
+ InitializeNativeTarget();
+ LLVMContext &amp;Context = getGlobalContext();
+
+ // Install standard binary operators.
+ // 1 is lowest precedence.
+ BinopPrecedence['&lt;'] = 10;
+ BinopPrecedence['+'] = 20;
+ BinopPrecedence['-'] = 20;
+ BinopPrecedence['*'] = 40; // highest.
+
+ // Prime the first token.
+ fprintf(stderr, "ready&gt; ");
+ getNextToken();
+
+ // Make the module, which holds all the code.
+ TheModule = new Module("my cool jit", Context);
+
+ // Create the JIT. This takes ownership of the module.
+ std::string ErrStr;
+ TheExecutionEngine = EngineBuilder(TheModule).setErrorStr(&amp;ErrStr).create();
+ if (!TheExecutionEngine) {
+ fprintf(stderr, "Could not create ExecutionEngine: %s\n", ErrStr.c_str());
+ exit(1);
+ }
+
+ FunctionPassManager OurFPM(TheModule);
+
+ // Set up the optimizer pipeline. Start with registering info about how the
+ // target lays out data structures.
+ OurFPM.add(new TargetData(*TheExecutionEngine-&gt;getTargetData()));
+ // Provide basic AliasAnalysis support for GVN.
+ OurFPM.add(createBasicAliasAnalysisPass());
+ // Do simple "peephole" optimizations and bit-twiddling optzns.
+ OurFPM.add(createInstructionCombiningPass());
+ // Reassociate expressions.
+ OurFPM.add(createReassociatePass());
+ // Eliminate Common SubExpressions.
+ OurFPM.add(createGVNPass());
+ // Simplify the control flow graph (deleting unreachable blocks, etc).
+ OurFPM.add(createCFGSimplificationPass());
+
+ OurFPM.doInitialization();
+
+ // Set the global so the code gen can use this.
+ TheFPM = &amp;OurFPM;
+
+ // Run the main "interpreter loop" now.
+ MainLoop();
+
+ TheFPM = 0;
+
+ // Print out all of the generated code.
+ TheModule-&gt;dump();
+
+ return 0;
+}
+</pre>
+</div>
+
+<a href="LangImpl5.html">Next: Extending the language: control flow</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/LangImpl5-cfg.png b/docs/tutorial/LangImpl5-cfg.png
new file mode 100644
index 00000000000..cdba92ff6c5
--- /dev/null
+++ b/docs/tutorial/LangImpl5-cfg.png
Binary files differ
diff --git a/docs/tutorial/LangImpl5.html b/docs/tutorial/LangImpl5.html
new file mode 100644
index 00000000000..2d406df3aaf
--- /dev/null
+++ b/docs/tutorial/LangImpl5.html
@@ -0,0 +1,1772 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Extending the Language: Control Flow</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Extending the Language: Control Flow</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 5
+ <ol>
+ <li><a href="#intro">Chapter 5 Introduction</a></li>
+ <li><a href="#ifthen">If/Then/Else</a>
+ <ol>
+ <li><a href="#iflexer">Lexer Extensions</a></li>
+ <li><a href="#ifast">AST Extensions</a></li>
+ <li><a href="#ifparser">Parser Extensions</a></li>
+ <li><a href="#ifir">LLVM IR</a></li>
+ <li><a href="#ifcodegen">Code Generation</a></li>
+ </ol>
+ </li>
+ <li><a href="#for">'for' Loop Expression</a>
+ <ol>
+ <li><a href="#forlexer">Lexer Extensions</a></li>
+ <li><a href="#forast">AST Extensions</a></li>
+ <li><a href="#forparser">Parser Extensions</a></li>
+ <li><a href="#forir">LLVM IR</a></li>
+ <li><a href="#forcodegen">Code Generation</a></li>
+ </ol>
+ </li>
+ <li><a href="#code">Full Code Listing</a></li>
+ </ol>
+</li>
+<li><a href="LangImpl6.html">Chapter 6</a>: Extending the Language:
+User-defined Operators</li>
+</ul>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Chapter 5 Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to Chapter 5 of the "<a href="index.html">Implementing a language
+with LLVM</a>" tutorial. Parts 1-4 described the implementation of the simple
+Kaleidoscope language and included support for generating LLVM IR, followed by
+optimizations and a JIT compiler. Unfortunately, as presented, Kaleidoscope is
+mostly useless: it has no control flow other than call and return. This means
+that you can't have conditional branches in the code, significantly limiting its
+power. In this episode of "build that compiler", we'll extend Kaleidoscope to
+have an if/then/else expression plus a simple 'for' loop.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="ifthen">If/Then/Else</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Extending Kaleidoscope to support if/then/else is quite straightforward. It
+basically requires adding support for this "new" concept to the lexer,
+parser, AST, and LLVM code emitter. This example is nice, because it shows how
+easy it is to "grow" a language over time, incrementally extending it as new
+ideas are discovered.</p>
+
+<p>Before we get going on "how" we add this extension, lets talk about "what" we
+want. The basic idea is that we want to be able to write this sort of thing:
+</p>
+
+<div class="doc_code">
+<pre>
+def fib(x)
+ if x &lt; 3 then
+ 1
+ else
+ fib(x-1)+fib(x-2);
+</pre>
+</div>
+
+<p>In Kaleidoscope, every construct is an expression: there are no statements.
+As such, the if/then/else expression needs to return a value like any other.
+Since we're using a mostly functional form, we'll have it evaluate its
+conditional, then return the 'then' or 'else' value based on how the condition
+was resolved. This is very similar to the C "?:" expression.</p>
+
+<p>The semantics of the if/then/else expression is that it evaluates the
+condition to a boolean equality value: 0.0 is considered to be false and
+everything else is considered to be true.
+If the condition is true, the first subexpression is evaluated and returned, if
+the condition is false, the second subexpression is evaluated and returned.
+Since Kaleidoscope allows side-effects, this behavior is important to nail down.
+</p>
+
+<p>Now that we know what we "want", lets break this down into its constituent
+pieces.</p>
+
+<!-- ======================================================================= -->
+<h4><a name="iflexer">Lexer Extensions for If/Then/Else</a></h4>
+<!-- ======================================================================= -->
+
+
+<div>
+
+<p>The lexer extensions are straightforward. First we add new enum values
+for the relevant tokens:</p>
+
+<div class="doc_code">
+<pre>
+ // control
+ tok_if = -6, tok_then = -7, tok_else = -8,
+</pre>
+</div>
+
+<p>Once we have that, we recognize the new keywords in the lexer. This is pretty simple
+stuff:</p>
+
+<div class="doc_code">
+<pre>
+ ...
+ if (IdentifierStr == "def") return tok_def;
+ if (IdentifierStr == "extern") return tok_extern;
+ <b>if (IdentifierStr == "if") return tok_if;
+ if (IdentifierStr == "then") return tok_then;
+ if (IdentifierStr == "else") return tok_else;</b>
+ return tok_identifier;
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="ifast">AST Extensions for If/Then/Else</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>To represent the new expression we add a new AST node for it:</p>
+
+<div class="doc_code">
+<pre>
+/// IfExprAST - Expression class for if/then/else.
+class IfExprAST : public ExprAST {
+ ExprAST *Cond, *Then, *Else;
+public:
+ IfExprAST(ExprAST *cond, ExprAST *then, ExprAST *_else)
+ : Cond(cond), Then(then), Else(_else) {}
+ virtual Value *Codegen();
+};
+</pre>
+</div>
+
+<p>The AST node just has pointers to the various subexpressions.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="ifparser">Parser Extensions for If/Then/Else</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>Now that we have the relevant tokens coming from the lexer and we have the
+AST node to build, our parsing logic is relatively straightforward. First we
+define a new parsing function:</p>
+
+<div class="doc_code">
+<pre>
+/// ifexpr ::= 'if' expression 'then' expression 'else' expression
+static ExprAST *ParseIfExpr() {
+ getNextToken(); // eat the if.
+
+ // condition.
+ ExprAST *Cond = ParseExpression();
+ if (!Cond) return 0;
+
+ if (CurTok != tok_then)
+ return Error("expected then");
+ getNextToken(); // eat the then
+
+ ExprAST *Then = ParseExpression();
+ if (Then == 0) return 0;
+
+ if (CurTok != tok_else)
+ return Error("expected else");
+
+ getNextToken();
+
+ ExprAST *Else = ParseExpression();
+ if (!Else) return 0;
+
+ return new IfExprAST(Cond, Then, Else);
+}
+</pre>
+</div>
+
+<p>Next we hook it up as a primary expression:</p>
+
+<div class="doc_code">
+<pre>
+static ExprAST *ParsePrimary() {
+ switch (CurTok) {
+ default: return Error("unknown token when expecting an expression");
+ case tok_identifier: return ParseIdentifierExpr();
+ case tok_number: return ParseNumberExpr();
+ case '(': return ParseParenExpr();
+ <b>case tok_if: return ParseIfExpr();</b>
+ }
+}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="ifir">LLVM IR for If/Then/Else</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>Now that we have it parsing and building the AST, the final piece is adding
+LLVM code generation support. This is the most interesting part of the
+if/then/else example, because this is where it starts to introduce new concepts.
+All of the code above has been thoroughly described in previous chapters.
+</p>
+
+<p>To motivate the code we want to produce, lets take a look at a simple
+example. Consider:</p>
+
+<div class="doc_code">
+<pre>
+extern foo();
+extern bar();
+def baz(x) if x then foo() else bar();
+</pre>
+</div>
+
+<p>If you disable optimizations, the code you'll (soon) get from Kaleidoscope
+looks like this:</p>
+
+<div class="doc_code">
+<pre>
+declare double @foo()
+
+declare double @bar()
+
+define double @baz(double %x) {
+entry:
+ %ifcond = fcmp one double %x, 0.000000e+00
+ br i1 %ifcond, label %then, label %else
+
+then: ; preds = %entry
+ %calltmp = call double @foo()
+ br label %ifcont
+
+else: ; preds = %entry
+ %calltmp1 = call double @bar()
+ br label %ifcont
+
+ifcont: ; preds = %else, %then
+ %iftmp = phi double [ %calltmp, %then ], [ %calltmp1, %else ]
+ ret double %iftmp
+}
+</pre>
+</div>
+
+<p>To visualize the control flow graph, you can use a nifty feature of the LLVM
+'<a href="http://llvm.org/cmds/opt.html">opt</a>' tool. If you put this LLVM IR
+into "t.ll" and run "<tt>llvm-as &lt; t.ll | opt -analyze -view-cfg</tt>", <a
+href="../ProgrammersManual.html#ViewGraph">a window will pop up</a> and you'll
+see this graph:</p>
+
+<div style="text-align: center"><img src="LangImpl5-cfg.png" alt="Example CFG" width="423"
+height="315"></div>
+
+<p>Another way to get this is to call "<tt>F-&gt;viewCFG()</tt>" or
+"<tt>F-&gt;viewCFGOnly()</tt>" (where F is a "<tt>Function*</tt>") either by
+inserting actual calls into the code and recompiling or by calling these in the
+debugger. LLVM has many nice features for visualizing various graphs.</p>
+
+<p>Getting back to the generated code, it is fairly simple: the entry block
+evaluates the conditional expression ("x" in our case here) and compares the
+result to 0.0 with the "<tt><a href="../LangRef.html#i_fcmp">fcmp</a> one</tt>"
+instruction ('one' is "Ordered and Not Equal"). Based on the result of this
+expression, the code jumps to either the "then" or "else" blocks, which contain
+the expressions for the true/false cases.</p>
+
+<p>Once the then/else blocks are finished executing, they both branch back to the
+'ifcont' block to execute the code that happens after the if/then/else. In this
+case the only thing left to do is to return to the caller of the function. The
+question then becomes: how does the code know which expression to return?</p>
+
+<p>The answer to this question involves an important SSA operation: the
+<a href="http://en.wikipedia.org/wiki/Static_single_assignment_form">Phi
+operation</a>. If you're not familiar with SSA, <a
+href="http://en.wikipedia.org/wiki/Static_single_assignment_form">the wikipedia
+article</a> is a good introduction and there are various other introductions to
+it available on your favorite search engine. The short version is that
+"execution" of the Phi operation requires "remembering" which block control came
+from. The Phi operation takes on the value corresponding to the input control
+block. In this case, if control comes in from the "then" block, it gets the
+value of "calltmp". If control comes from the "else" block, it gets the value
+of "calltmp1".</p>
+
+<p>At this point, you are probably starting to think "Oh no! This means my
+simple and elegant front-end will have to start generating SSA form in order to
+use LLVM!". Fortunately, this is not the case, and we strongly advise
+<em>not</em> implementing an SSA construction algorithm in your front-end
+unless there is an amazingly good reason to do so. In practice, there are two
+sorts of values that float around in code written for your average imperative
+programming language that might need Phi nodes:</p>
+
+<ol>
+<li>Code that involves user variables: <tt>x = 1; x = x + 1; </tt></li>
+<li>Values that are implicit in the structure of your AST, such as the Phi node
+in this case.</li>
+</ol>
+
+<p>In <a href="LangImpl7.html">Chapter 7</a> of this tutorial ("mutable
+variables"), we'll talk about #1
+in depth. For now, just believe me that you don't need SSA construction to
+handle this case. For #2, you have the choice of using the techniques that we will
+describe for #1, or you can insert Phi nodes directly, if convenient. In this
+case, it is really really easy to generate the Phi node, so we choose to do it
+directly.</p>
+
+<p>Okay, enough of the motivation and overview, lets generate code!</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="ifcodegen">Code Generation for If/Then/Else</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>In order to generate code for this, we implement the <tt>Codegen</tt> method
+for <tt>IfExprAST</tt>:</p>
+
+<div class="doc_code">
+<pre>
+Value *IfExprAST::Codegen() {
+ Value *CondV = Cond-&gt;Codegen();
+ if (CondV == 0) return 0;
+
+ // Convert condition to a bool by comparing equal to 0.0.
+ CondV = Builder.CreateFCmpONE(CondV,
+ ConstantFP::get(getGlobalContext(), APFloat(0.0)),
+ "ifcond");
+</pre>
+</div>
+
+<p>This code is straightforward and similar to what we saw before. We emit the
+expression for the condition, then compare that value to zero to get a truth
+value as a 1-bit (bool) value.</p>
+
+<div class="doc_code">
+<pre>
+ Function *TheFunction = Builder.GetInsertBlock()-&gt;getParent();
+
+ // Create blocks for the then and else cases. Insert the 'then' block at the
+ // end of the function.
+ BasicBlock *ThenBB = BasicBlock::Create(getGlobalContext(), "then", TheFunction);
+ BasicBlock *ElseBB = BasicBlock::Create(getGlobalContext(), "else");
+ BasicBlock *MergeBB = BasicBlock::Create(getGlobalContext(), "ifcont");
+
+ Builder.CreateCondBr(CondV, ThenBB, ElseBB);
+</pre>
+</div>
+
+<p>This code creates the basic blocks that are related to the if/then/else
+statement, and correspond directly to the blocks in the example above. The
+first line gets the current Function object that is being built. It
+gets this by asking the builder for the current BasicBlock, and asking that
+block for its "parent" (the function it is currently embedded into).</p>
+
+<p>Once it has that, it creates three blocks. Note that it passes "TheFunction"
+into the constructor for the "then" block. This causes the constructor to
+automatically insert the new block into the end of the specified function. The
+other two blocks are created, but aren't yet inserted into the function.</p>
+
+<p>Once the blocks are created, we can emit the conditional branch that chooses
+between them. Note that creating new blocks does not implicitly affect the
+IRBuilder, so it is still inserting into the block that the condition
+went into. Also note that it is creating a branch to the "then" block and the
+"else" block, even though the "else" block isn't inserted into the function yet.
+This is all ok: it is the standard way that LLVM supports forward
+references.</p>
+
+<div class="doc_code">
+<pre>
+ // Emit then value.
+ Builder.SetInsertPoint(ThenBB);
+
+ Value *ThenV = Then-&gt;Codegen();
+ if (ThenV == 0) return 0;
+
+ Builder.CreateBr(MergeBB);
+ // Codegen of 'Then' can change the current block, update ThenBB for the PHI.
+ ThenBB = Builder.GetInsertBlock();
+</pre>
+</div>
+
+<p>After the conditional branch is inserted, we move the builder to start
+inserting into the "then" block. Strictly speaking, this call moves the
+insertion point to be at the end of the specified block. However, since the
+"then" block is empty, it also starts out by inserting at the beginning of the
+block. :)</p>
+
+<p>Once the insertion point is set, we recursively codegen the "then" expression
+from the AST. To finish off the "then" block, we create an unconditional branch
+to the merge block. One interesting (and very important) aspect of the LLVM IR
+is that it <a href="../LangRef.html#functionstructure">requires all basic blocks
+to be "terminated"</a> with a <a href="../LangRef.html#terminators">control flow
+instruction</a> such as return or branch. This means that all control flow,
+<em>including fall throughs</em> must be made explicit in the LLVM IR. If you
+violate this rule, the verifier will emit an error.</p>
+
+<p>The final line here is quite subtle, but is very important. The basic issue
+is that when we create the Phi node in the merge block, we need to set up the
+block/value pairs that indicate how the Phi will work. Importantly, the Phi
+node expects to have an entry for each predecessor of the block in the CFG. Why
+then, are we getting the current block when we just set it to ThenBB 5 lines
+above? The problem is that the "Then" expression may actually itself change the
+block that the Builder is emitting into if, for example, it contains a nested
+"if/then/else" expression. Because calling Codegen recursively could
+arbitrarily change the notion of the current block, we are required to get an
+up-to-date value for code that will set up the Phi node.</p>
+
+<div class="doc_code">
+<pre>
+ // Emit else block.
+ TheFunction-&gt;getBasicBlockList().push_back(ElseBB);
+ Builder.SetInsertPoint(ElseBB);
+
+ Value *ElseV = Else-&gt;Codegen();
+ if (ElseV == 0) return 0;
+
+ Builder.CreateBr(MergeBB);
+ // Codegen of 'Else' can change the current block, update ElseBB for the PHI.
+ ElseBB = Builder.GetInsertBlock();
+</pre>
+</div>
+
+<p>Code generation for the 'else' block is basically identical to codegen for
+the 'then' block. The only significant difference is the first line, which adds
+the 'else' block to the function. Recall previously that the 'else' block was
+created, but not added to the function. Now that the 'then' and 'else' blocks
+are emitted, we can finish up with the merge code:</p>
+
+<div class="doc_code">
+<pre>
+ // Emit merge block.
+ TheFunction->getBasicBlockList().push_back(MergeBB);
+ Builder.SetInsertPoint(MergeBB);
+ PHINode *PN = Builder.CreatePHI(Type::getDoubleTy(getGlobalContext()), 2,
+ "iftmp");
+
+ PN->addIncoming(ThenV, ThenBB);
+ PN->addIncoming(ElseV, ElseBB);
+ return PN;
+}
+</pre>
+</div>
+
+<p>The first two lines here are now familiar: the first adds the "merge" block
+to the Function object (it was previously floating, like the else block above).
+The second block changes the insertion point so that newly created code will go
+into the "merge" block. Once that is done, we need to create the PHI node and
+set up the block/value pairs for the PHI.</p>
+
+<p>Finally, the CodeGen function returns the phi node as the value computed by
+the if/then/else expression. In our example above, this returned value will
+feed into the code for the top-level function, which will create the return
+instruction.</p>
+
+<p>Overall, we now have the ability to execute conditional code in
+Kaleidoscope. With this extension, Kaleidoscope is a fairly complete language
+that can calculate a wide variety of numeric functions. Next up we'll add
+another useful expression that is familiar from non-functional languages...</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="for">'for' Loop Expression</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Now that we know how to add basic control flow constructs to the language,
+we have the tools to add more powerful things. Lets add something more
+aggressive, a 'for' expression:</p>
+
+<div class="doc_code">
+<pre>
+ extern putchard(char)
+ def printstar(n)
+ for i = 1, i &lt; n, 1.0 in
+ putchard(42); # ascii 42 = '*'
+
+ # print 100 '*' characters
+ printstar(100);
+</pre>
+</div>
+
+<p>This expression defines a new variable ("i" in this case) which iterates from
+a starting value, while the condition ("i &lt; n" in this case) is true,
+incrementing by an optional step value ("1.0" in this case). If the step value
+is omitted, it defaults to 1.0. While the loop is true, it executes its
+body expression. Because we don't have anything better to return, we'll just
+define the loop as always returning 0.0. In the future when we have mutable
+variables, it will get more useful.</p>
+
+<p>As before, lets talk about the changes that we need to Kaleidoscope to
+support this.</p>
+
+<!-- ======================================================================= -->
+<h4><a name="forlexer">Lexer Extensions for the 'for' Loop</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>The lexer extensions are the same sort of thing as for if/then/else:</p>
+
+<div class="doc_code">
+<pre>
+ ... in enum Token ...
+ // control
+ tok_if = -6, tok_then = -7, tok_else = -8,
+<b> tok_for = -9, tok_in = -10</b>
+
+ ... in gettok ...
+ if (IdentifierStr == "def") return tok_def;
+ if (IdentifierStr == "extern") return tok_extern;
+ if (IdentifierStr == "if") return tok_if;
+ if (IdentifierStr == "then") return tok_then;
+ if (IdentifierStr == "else") return tok_else;
+ <b>if (IdentifierStr == "for") return tok_for;
+ if (IdentifierStr == "in") return tok_in;</b>
+ return tok_identifier;
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="forast">AST Extensions for the 'for' Loop</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>The AST node is just as simple. It basically boils down to capturing
+the variable name and the constituent expressions in the node.</p>
+
+<div class="doc_code">
+<pre>
+/// ForExprAST - Expression class for for/in.
+class ForExprAST : public ExprAST {
+ std::string VarName;
+ ExprAST *Start, *End, *Step, *Body;
+public:
+ ForExprAST(const std::string &amp;varname, ExprAST *start, ExprAST *end,
+ ExprAST *step, ExprAST *body)
+ : VarName(varname), Start(start), End(end), Step(step), Body(body) {}
+ virtual Value *Codegen();
+};
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="forparser">Parser Extensions for the 'for' Loop</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>The parser code is also fairly standard. The only interesting thing here is
+handling of the optional step value. The parser code handles it by checking to
+see if the second comma is present. If not, it sets the step value to null in
+the AST node:</p>
+
+<div class="doc_code">
+<pre>
+/// forexpr ::= 'for' identifier '=' expr ',' expr (',' expr)? 'in' expression
+static ExprAST *ParseForExpr() {
+ getNextToken(); // eat the for.
+
+ if (CurTok != tok_identifier)
+ return Error("expected identifier after for");
+
+ std::string IdName = IdentifierStr;
+ getNextToken(); // eat identifier.
+
+ if (CurTok != '=')
+ return Error("expected '=' after for");
+ getNextToken(); // eat '='.
+
+
+ ExprAST *Start = ParseExpression();
+ if (Start == 0) return 0;
+ if (CurTok != ',')
+ return Error("expected ',' after for start value");
+ getNextToken();
+
+ ExprAST *End = ParseExpression();
+ if (End == 0) return 0;
+
+ // The step value is optional.
+ ExprAST *Step = 0;
+ if (CurTok == ',') {
+ getNextToken();
+ Step = ParseExpression();
+ if (Step == 0) return 0;
+ }
+
+ if (CurTok != tok_in)
+ return Error("expected 'in' after for");
+ getNextToken(); // eat 'in'.
+
+ ExprAST *Body = ParseExpression();
+ if (Body == 0) return 0;
+
+ return new ForExprAST(IdName, Start, End, Step, Body);
+}
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="forir">LLVM IR for the 'for' Loop</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>Now we get to the good part: the LLVM IR we want to generate for this thing.
+With the simple example above, we get this LLVM IR (note that this dump is
+generated with optimizations disabled for clarity):
+</p>
+
+<div class="doc_code">
+<pre>
+declare double @putchard(double)
+
+define double @printstar(double %n) {
+entry:
+ ; initial value = 1.0 (inlined into phi)
+ br label %loop
+
+loop: ; preds = %loop, %entry
+ %i = phi double [ 1.000000e+00, %entry ], [ %nextvar, %loop ]
+ ; body
+ %calltmp = call double @putchard(double 4.200000e+01)
+ ; increment
+ %nextvar = fadd double %i, 1.000000e+00
+
+ ; termination test
+ %cmptmp = fcmp ult double %i, %n
+ %booltmp = uitofp i1 %cmptmp to double
+ %loopcond = fcmp one double %booltmp, 0.000000e+00
+ br i1 %loopcond, label %loop, label %afterloop
+
+afterloop: ; preds = %loop
+ ; loop always returns 0.0
+ ret double 0.000000e+00
+}
+</pre>
+</div>
+
+<p>This loop contains all the same constructs we saw before: a phi node, several
+expressions, and some basic blocks. Lets see how this fits together.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="forcodegen">Code Generation for the 'for' Loop</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>The first part of Codegen is very simple: we just output the start expression
+for the loop value:</p>
+
+<div class="doc_code">
+<pre>
+Value *ForExprAST::Codegen() {
+ // Emit the start code first, without 'variable' in scope.
+ Value *StartVal = Start-&gt;Codegen();
+ if (StartVal == 0) return 0;
+</pre>
+</div>
+
+<p>With this out of the way, the next step is to set up the LLVM basic block
+for the start of the loop body. In the case above, the whole loop body is one
+block, but remember that the body code itself could consist of multiple blocks
+(e.g. if it contains an if/then/else or a for/in expression).</p>
+
+<div class="doc_code">
+<pre>
+ // Make the new basic block for the loop header, inserting after current
+ // block.
+ Function *TheFunction = Builder.GetInsertBlock()-&gt;getParent();
+ BasicBlock *PreheaderBB = Builder.GetInsertBlock();
+ BasicBlock *LoopBB = BasicBlock::Create(getGlobalContext(), "loop", TheFunction);
+
+ // Insert an explicit fall through from the current block to the LoopBB.
+ Builder.CreateBr(LoopBB);
+</pre>
+</div>
+
+<p>This code is similar to what we saw for if/then/else. Because we will need
+it to create the Phi node, we remember the block that falls through into the
+loop. Once we have that, we create the actual block that starts the loop and
+create an unconditional branch for the fall-through between the two blocks.</p>
+
+<div class="doc_code">
+<pre>
+ // Start insertion in LoopBB.
+ Builder.SetInsertPoint(LoopBB);
+
+ // Start the PHI node with an entry for Start.
+ PHINode *Variable = Builder.CreatePHI(Type::getDoubleTy(getGlobalContext()), 2, VarName.c_str());
+ Variable-&gt;addIncoming(StartVal, PreheaderBB);
+</pre>
+</div>
+
+<p>Now that the "preheader" for the loop is set up, we switch to emitting code
+for the loop body. To begin with, we move the insertion point and create the
+PHI node for the loop induction variable. Since we already know the incoming
+value for the starting value, we add it to the Phi node. Note that the Phi will
+eventually get a second value for the backedge, but we can't set it up yet
+(because it doesn't exist!).</p>
+
+<div class="doc_code">
+<pre>
+ // Within the loop, the variable is defined equal to the PHI node. If it
+ // shadows an existing variable, we have to restore it, so save it now.
+ Value *OldVal = NamedValues[VarName];
+ NamedValues[VarName] = Variable;
+
+ // Emit the body of the loop. This, like any other expr, can change the
+ // current BB. Note that we ignore the value computed by the body, but don't
+ // allow an error.
+ if (Body-&gt;Codegen() == 0)
+ return 0;
+</pre>
+</div>
+
+<p>Now the code starts to get more interesting. Our 'for' loop introduces a new
+variable to the symbol table. This means that our symbol table can now contain
+either function arguments or loop variables. To handle this, before we codegen
+the body of the loop, we add the loop variable as the current value for its
+name. Note that it is possible that there is a variable of the same name in the
+outer scope. It would be easy to make this an error (emit an error and return
+null if there is already an entry for VarName) but we choose to allow shadowing
+of variables. In order to handle this correctly, we remember the Value that
+we are potentially shadowing in <tt>OldVal</tt> (which will be null if there is
+no shadowed variable).</p>
+
+<p>Once the loop variable is set into the symbol table, the code recursively
+codegen's the body. This allows the body to use the loop variable: any
+references to it will naturally find it in the symbol table.</p>
+
+<div class="doc_code">
+<pre>
+ // Emit the step value.
+ Value *StepVal;
+ if (Step) {
+ StepVal = Step-&gt;Codegen();
+ if (StepVal == 0) return 0;
+ } else {
+ // If not specified, use 1.0.
+ StepVal = ConstantFP::get(getGlobalContext(), APFloat(1.0));
+ }
+
+ Value *NextVar = Builder.CreateFAdd(Variable, StepVal, "nextvar");
+</pre>
+</div>
+
+<p>Now that the body is emitted, we compute the next value of the iteration
+variable by adding the step value, or 1.0 if it isn't present. '<tt>NextVar</tt>'
+will be the value of the loop variable on the next iteration of the loop.</p>
+
+<div class="doc_code">
+<pre>
+ // Compute the end condition.
+ Value *EndCond = End-&gt;Codegen();
+ if (EndCond == 0) return EndCond;
+
+ // Convert condition to a bool by comparing equal to 0.0.
+ EndCond = Builder.CreateFCmpONE(EndCond,
+ ConstantFP::get(getGlobalContext(), APFloat(0.0)),
+ "loopcond");
+</pre>
+</div>
+
+<p>Finally, we evaluate the exit value of the loop, to determine whether the
+loop should exit. This mirrors the condition evaluation for the if/then/else
+statement.</p>
+
+<div class="doc_code">
+<pre>
+ // Create the "after loop" block and insert it.
+ BasicBlock *LoopEndBB = Builder.GetInsertBlock();
+ BasicBlock *AfterBB = BasicBlock::Create(getGlobalContext(), "afterloop", TheFunction);
+
+ // Insert the conditional branch into the end of LoopEndBB.
+ Builder.CreateCondBr(EndCond, LoopBB, AfterBB);
+
+ // Any new code will be inserted in AfterBB.
+ Builder.SetInsertPoint(AfterBB);
+</pre>
+</div>
+
+<p>With the code for the body of the loop complete, we just need to finish up
+the control flow for it. This code remembers the end block (for the phi node),
+then creates the block for the loop exit ("afterloop"). Based on the value of
+the exit condition, it creates a conditional branch that chooses between
+executing the loop again and exiting the loop. Any future code is emitted in
+the "afterloop" block, so it sets the insertion position to it.</p>
+
+<div class="doc_code">
+<pre>
+ // Add a new entry to the PHI node for the backedge.
+ Variable-&gt;addIncoming(NextVar, LoopEndBB);
+
+ // Restore the unshadowed variable.
+ if (OldVal)
+ NamedValues[VarName] = OldVal;
+ else
+ NamedValues.erase(VarName);
+
+ // for expr always returns 0.0.
+ return Constant::getNullValue(Type::getDoubleTy(getGlobalContext()));
+}
+</pre>
+</div>
+
+<p>The final code handles various cleanups: now that we have the "NextVar"
+value, we can add the incoming value to the loop PHI node. After that, we
+remove the loop variable from the symbol table, so that it isn't in scope after
+the for loop. Finally, code generation of the for loop always returns 0.0, so
+that is what we return from <tt>ForExprAST::Codegen</tt>.</p>
+
+<p>With this, we conclude the "adding control flow to Kaleidoscope" chapter of
+the tutorial. In this chapter we added two control flow constructs, and used them to motivate a couple of aspects of the LLVM IR that are important for front-end implementors
+to know. In the next chapter of our saga, we will get a bit crazier and add
+<a href="LangImpl6.html">user-defined operators</a> to our poor innocent
+language.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="code">Full Code Listing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Here is the complete code listing for our running example, enhanced with the
+if/then/else and for expressions.. To build this example, use:
+</p>
+
+<div class="doc_code">
+<pre>
+# Compile
+clang++ -g toy.cpp `llvm-config --cppflags --ldflags --libs core jit native` -O3 -o toy
+# Run
+./toy
+</pre>
+</div>
+
+<p>Here is the code:</p>
+
+<div class="doc_code">
+<pre>
+#include "llvm/DerivedTypes.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/JIT.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Support/TargetSelect.h"
+#include &lt;cstdio&gt;
+#include &lt;string&gt;
+#include &lt;map&gt;
+#include &lt;vector&gt;
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Lexer
+//===----------------------------------------------------------------------===//
+
+// The lexer returns tokens [0-255] if it is an unknown character, otherwise one
+// of these for known things.
+enum Token {
+ tok_eof = -1,
+
+ // commands
+ tok_def = -2, tok_extern = -3,
+
+ // primary
+ tok_identifier = -4, tok_number = -5,
+
+ // control
+ tok_if = -6, tok_then = -7, tok_else = -8,
+ tok_for = -9, tok_in = -10
+};
+
+static std::string IdentifierStr; // Filled in if tok_identifier
+static double NumVal; // Filled in if tok_number
+
+/// gettok - Return the next token from standard input.
+static int gettok() {
+ static int LastChar = ' ';
+
+ // Skip any whitespace.
+ while (isspace(LastChar))
+ LastChar = getchar();
+
+ if (isalpha(LastChar)) { // identifier: [a-zA-Z][a-zA-Z0-9]*
+ IdentifierStr = LastChar;
+ while (isalnum((LastChar = getchar())))
+ IdentifierStr += LastChar;
+
+ if (IdentifierStr == "def") return tok_def;
+ if (IdentifierStr == "extern") return tok_extern;
+ if (IdentifierStr == "if") return tok_if;
+ if (IdentifierStr == "then") return tok_then;
+ if (IdentifierStr == "else") return tok_else;
+ if (IdentifierStr == "for") return tok_for;
+ if (IdentifierStr == "in") return tok_in;
+ return tok_identifier;
+ }
+
+ if (isdigit(LastChar) || LastChar == '.') { // Number: [0-9.]+
+ std::string NumStr;
+ do {
+ NumStr += LastChar;
+ LastChar = getchar();
+ } while (isdigit(LastChar) || LastChar == '.');
+
+ NumVal = strtod(NumStr.c_str(), 0);
+ return tok_number;
+ }
+
+ if (LastChar == '#') {
+ // Comment until end of line.
+ do LastChar = getchar();
+ while (LastChar != EOF &amp;&amp; LastChar != '\n' &amp;&amp; LastChar != '\r');
+
+ if (LastChar != EOF)
+ return gettok();
+ }
+
+ // Check for end of file. Don't eat the EOF.
+ if (LastChar == EOF)
+ return tok_eof;
+
+ // Otherwise, just return the character as its ascii value.
+ int ThisChar = LastChar;
+ LastChar = getchar();
+ return ThisChar;
+}
+
+//===----------------------------------------------------------------------===//
+// Abstract Syntax Tree (aka Parse Tree)
+//===----------------------------------------------------------------------===//
+
+/// ExprAST - Base class for all expression nodes.
+class ExprAST {
+public:
+ virtual ~ExprAST() {}
+ virtual Value *Codegen() = 0;
+};
+
+/// NumberExprAST - Expression class for numeric literals like "1.0".
+class NumberExprAST : public ExprAST {
+ double Val;
+public:
+ NumberExprAST(double val) : Val(val) {}
+ virtual Value *Codegen();
+};
+
+/// VariableExprAST - Expression class for referencing a variable, like "a".
+class VariableExprAST : public ExprAST {
+ std::string Name;
+public:
+ VariableExprAST(const std::string &amp;name) : Name(name) {}
+ virtual Value *Codegen();
+};
+
+/// BinaryExprAST - Expression class for a binary operator.
+class BinaryExprAST : public ExprAST {
+ char Op;
+ ExprAST *LHS, *RHS;
+public:
+ BinaryExprAST(char op, ExprAST *lhs, ExprAST *rhs)
+ : Op(op), LHS(lhs), RHS(rhs) {}
+ virtual Value *Codegen();
+};
+
+/// CallExprAST - Expression class for function calls.
+class CallExprAST : public ExprAST {
+ std::string Callee;
+ std::vector&lt;ExprAST*&gt; Args;
+public:
+ CallExprAST(const std::string &amp;callee, std::vector&lt;ExprAST*&gt; &amp;args)
+ : Callee(callee), Args(args) {}
+ virtual Value *Codegen();
+};
+
+/// IfExprAST - Expression class for if/then/else.
+class IfExprAST : public ExprAST {
+ ExprAST *Cond, *Then, *Else;
+public:
+ IfExprAST(ExprAST *cond, ExprAST *then, ExprAST *_else)
+ : Cond(cond), Then(then), Else(_else) {}
+ virtual Value *Codegen();
+};
+
+/// ForExprAST - Expression class for for/in.
+class ForExprAST : public ExprAST {
+ std::string VarName;
+ ExprAST *Start, *End, *Step, *Body;
+public:
+ ForExprAST(const std::string &amp;varname, ExprAST *start, ExprAST *end,
+ ExprAST *step, ExprAST *body)
+ : VarName(varname), Start(start), End(end), Step(step), Body(body) {}
+ virtual Value *Codegen();
+};
+
+/// PrototypeAST - This class represents the "prototype" for a function,
+/// which captures its name, and its argument names (thus implicitly the number
+/// of arguments the function takes).
+class PrototypeAST {
+ std::string Name;
+ std::vector&lt;std::string&gt; Args;
+public:
+ PrototypeAST(const std::string &amp;name, const std::vector&lt;std::string&gt; &amp;args)
+ : Name(name), Args(args) {}
+
+ Function *Codegen();
+};
+
+/// FunctionAST - This class represents a function definition itself.
+class FunctionAST {
+ PrototypeAST *Proto;
+ ExprAST *Body;
+public:
+ FunctionAST(PrototypeAST *proto, ExprAST *body)
+ : Proto(proto), Body(body) {}
+
+ Function *Codegen();
+};
+
+//===----------------------------------------------------------------------===//
+// Parser
+//===----------------------------------------------------------------------===//
+
+/// CurTok/getNextToken - Provide a simple token buffer. CurTok is the current
+/// token the parser is looking at. getNextToken reads another token from the
+/// lexer and updates CurTok with its results.
+static int CurTok;
+static int getNextToken() {
+ return CurTok = gettok();
+}
+
+/// BinopPrecedence - This holds the precedence for each binary operator that is
+/// defined.
+static std::map&lt;char, int&gt; BinopPrecedence;
+
+/// GetTokPrecedence - Get the precedence of the pending binary operator token.
+static int GetTokPrecedence() {
+ if (!isascii(CurTok))
+ return -1;
+
+ // Make sure it's a declared binop.
+ int TokPrec = BinopPrecedence[CurTok];
+ if (TokPrec &lt;= 0) return -1;
+ return TokPrec;
+}
+
+/// Error* - These are little helper functions for error handling.
+ExprAST *Error(const char *Str) { fprintf(stderr, "Error: %s\n", Str);return 0;}
+PrototypeAST *ErrorP(const char *Str) { Error(Str); return 0; }
+FunctionAST *ErrorF(const char *Str) { Error(Str); return 0; }
+
+static ExprAST *ParseExpression();
+
+/// identifierexpr
+/// ::= identifier
+/// ::= identifier '(' expression* ')'
+static ExprAST *ParseIdentifierExpr() {
+ std::string IdName = IdentifierStr;
+
+ getNextToken(); // eat identifier.
+
+ if (CurTok != '(') // Simple variable ref.
+ return new VariableExprAST(IdName);
+
+ // Call.
+ getNextToken(); // eat (
+ std::vector&lt;ExprAST*&gt; Args;
+ if (CurTok != ')') {
+ while (1) {
+ ExprAST *Arg = ParseExpression();
+ if (!Arg) return 0;
+ Args.push_back(Arg);
+
+ if (CurTok == ')') break;
+
+ if (CurTok != ',')
+ return Error("Expected ')' or ',' in argument list");
+ getNextToken();
+ }
+ }
+
+ // Eat the ')'.
+ getNextToken();
+
+ return new CallExprAST(IdName, Args);
+}
+
+/// numberexpr ::= number
+static ExprAST *ParseNumberExpr() {
+ ExprAST *Result = new NumberExprAST(NumVal);
+ getNextToken(); // consume the number
+ return Result;
+}
+
+/// parenexpr ::= '(' expression ')'
+static ExprAST *ParseParenExpr() {
+ getNextToken(); // eat (.
+ ExprAST *V = ParseExpression();
+ if (!V) return 0;
+
+ if (CurTok != ')')
+ return Error("expected ')'");
+ getNextToken(); // eat ).
+ return V;
+}
+
+/// ifexpr ::= 'if' expression 'then' expression 'else' expression
+static ExprAST *ParseIfExpr() {
+ getNextToken(); // eat the if.
+
+ // condition.
+ ExprAST *Cond = ParseExpression();
+ if (!Cond) return 0;
+
+ if (CurTok != tok_then)
+ return Error("expected then");
+ getNextToken(); // eat the then
+
+ ExprAST *Then = ParseExpression();
+ if (Then == 0) return 0;
+
+ if (CurTok != tok_else)
+ return Error("expected else");
+
+ getNextToken();
+
+ ExprAST *Else = ParseExpression();
+ if (!Else) return 0;
+
+ return new IfExprAST(Cond, Then, Else);
+}
+
+/// forexpr ::= 'for' identifier '=' expr ',' expr (',' expr)? 'in' expression
+static ExprAST *ParseForExpr() {
+ getNextToken(); // eat the for.
+
+ if (CurTok != tok_identifier)
+ return Error("expected identifier after for");
+
+ std::string IdName = IdentifierStr;
+ getNextToken(); // eat identifier.
+
+ if (CurTok != '=')
+ return Error("expected '=' after for");
+ getNextToken(); // eat '='.
+
+
+ ExprAST *Start = ParseExpression();
+ if (Start == 0) return 0;
+ if (CurTok != ',')
+ return Error("expected ',' after for start value");
+ getNextToken();
+
+ ExprAST *End = ParseExpression();
+ if (End == 0) return 0;
+
+ // The step value is optional.
+ ExprAST *Step = 0;
+ if (CurTok == ',') {
+ getNextToken();
+ Step = ParseExpression();
+ if (Step == 0) return 0;
+ }
+
+ if (CurTok != tok_in)
+ return Error("expected 'in' after for");
+ getNextToken(); // eat 'in'.
+
+ ExprAST *Body = ParseExpression();
+ if (Body == 0) return 0;
+
+ return new ForExprAST(IdName, Start, End, Step, Body);
+}
+
+/// primary
+/// ::= identifierexpr
+/// ::= numberexpr
+/// ::= parenexpr
+/// ::= ifexpr
+/// ::= forexpr
+static ExprAST *ParsePrimary() {
+ switch (CurTok) {
+ default: return Error("unknown token when expecting an expression");
+ case tok_identifier: return ParseIdentifierExpr();
+ case tok_number: return ParseNumberExpr();
+ case '(': return ParseParenExpr();
+ case tok_if: return ParseIfExpr();
+ case tok_for: return ParseForExpr();
+ }
+}
+
+/// binoprhs
+/// ::= ('+' primary)*
+static ExprAST *ParseBinOpRHS(int ExprPrec, ExprAST *LHS) {
+ // If this is a binop, find its precedence.
+ while (1) {
+ int TokPrec = GetTokPrecedence();
+
+ // If this is a binop that binds at least as tightly as the current binop,
+ // consume it, otherwise we are done.
+ if (TokPrec &lt; ExprPrec)
+ return LHS;
+
+ // Okay, we know this is a binop.
+ int BinOp = CurTok;
+ getNextToken(); // eat binop
+
+ // Parse the primary expression after the binary operator.
+ ExprAST *RHS = ParsePrimary();
+ if (!RHS) return 0;
+
+ // If BinOp binds less tightly with RHS than the operator after RHS, let
+ // the pending operator take RHS as its LHS.
+ int NextPrec = GetTokPrecedence();
+ if (TokPrec &lt; NextPrec) {
+ RHS = ParseBinOpRHS(TokPrec+1, RHS);
+ if (RHS == 0) return 0;
+ }
+
+ // Merge LHS/RHS.
+ LHS = new BinaryExprAST(BinOp, LHS, RHS);
+ }
+}
+
+/// expression
+/// ::= primary binoprhs
+///
+static ExprAST *ParseExpression() {
+ ExprAST *LHS = ParsePrimary();
+ if (!LHS) return 0;
+
+ return ParseBinOpRHS(0, LHS);
+}
+
+/// prototype
+/// ::= id '(' id* ')'
+static PrototypeAST *ParsePrototype() {
+ if (CurTok != tok_identifier)
+ return ErrorP("Expected function name in prototype");
+
+ std::string FnName = IdentifierStr;
+ getNextToken();
+
+ if (CurTok != '(')
+ return ErrorP("Expected '(' in prototype");
+
+ std::vector&lt;std::string&gt; ArgNames;
+ while (getNextToken() == tok_identifier)
+ ArgNames.push_back(IdentifierStr);
+ if (CurTok != ')')
+ return ErrorP("Expected ')' in prototype");
+
+ // success.
+ getNextToken(); // eat ')'.
+
+ return new PrototypeAST(FnName, ArgNames);
+}
+
+/// definition ::= 'def' prototype expression
+static FunctionAST *ParseDefinition() {
+ getNextToken(); // eat def.
+ PrototypeAST *Proto = ParsePrototype();
+ if (Proto == 0) return 0;
+
+ if (ExprAST *E = ParseExpression())
+ return new FunctionAST(Proto, E);
+ return 0;
+}
+
+/// toplevelexpr ::= expression
+static FunctionAST *ParseTopLevelExpr() {
+ if (ExprAST *E = ParseExpression()) {
+ // Make an anonymous proto.
+ PrototypeAST *Proto = new PrototypeAST("", std::vector&lt;std::string&gt;());
+ return new FunctionAST(Proto, E);
+ }
+ return 0;
+}
+
+/// external ::= 'extern' prototype
+static PrototypeAST *ParseExtern() {
+ getNextToken(); // eat extern.
+ return ParsePrototype();
+}
+
+//===----------------------------------------------------------------------===//
+// Code Generation
+//===----------------------------------------------------------------------===//
+
+static Module *TheModule;
+static IRBuilder&lt;&gt; Builder(getGlobalContext());
+static std::map&lt;std::string, Value*&gt; NamedValues;
+static FunctionPassManager *TheFPM;
+
+Value *ErrorV(const char *Str) { Error(Str); return 0; }
+
+Value *NumberExprAST::Codegen() {
+ return ConstantFP::get(getGlobalContext(), APFloat(Val));
+}
+
+Value *VariableExprAST::Codegen() {
+ // Look this variable up in the function.
+ Value *V = NamedValues[Name];
+ return V ? V : ErrorV("Unknown variable name");
+}
+
+Value *BinaryExprAST::Codegen() {
+ Value *L = LHS-&gt;Codegen();
+ Value *R = RHS-&gt;Codegen();
+ if (L == 0 || R == 0) return 0;
+
+ switch (Op) {
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
+ case '&lt;':
+ L = Builder.CreateFCmpULT(L, R, "cmptmp");
+ // Convert bool 0/1 to double 0.0 or 1.0
+ return Builder.CreateUIToFP(L, Type::getDoubleTy(getGlobalContext()),
+ "booltmp");
+ default: return ErrorV("invalid binary operator");
+ }
+}
+
+Value *CallExprAST::Codegen() {
+ // Look up the name in the global module table.
+ Function *CalleeF = TheModule-&gt;getFunction(Callee);
+ if (CalleeF == 0)
+ return ErrorV("Unknown function referenced");
+
+ // If argument mismatch error.
+ if (CalleeF-&gt;arg_size() != Args.size())
+ return ErrorV("Incorrect # arguments passed");
+
+ std::vector&lt;Value*&gt; ArgsV;
+ for (unsigned i = 0, e = Args.size(); i != e; ++i) {
+ ArgsV.push_back(Args[i]-&gt;Codegen());
+ if (ArgsV.back() == 0) return 0;
+ }
+
+ return Builder.CreateCall(CalleeF, ArgsV, "calltmp");
+}
+
+Value *IfExprAST::Codegen() {
+ Value *CondV = Cond-&gt;Codegen();
+ if (CondV == 0) return 0;
+
+ // Convert condition to a bool by comparing equal to 0.0.
+ CondV = Builder.CreateFCmpONE(CondV,
+ ConstantFP::get(getGlobalContext(), APFloat(0.0)),
+ "ifcond");
+
+ Function *TheFunction = Builder.GetInsertBlock()-&gt;getParent();
+
+ // Create blocks for the then and else cases. Insert the 'then' block at the
+ // end of the function.
+ BasicBlock *ThenBB = BasicBlock::Create(getGlobalContext(), "then", TheFunction);
+ BasicBlock *ElseBB = BasicBlock::Create(getGlobalContext(), "else");
+ BasicBlock *MergeBB = BasicBlock::Create(getGlobalContext(), "ifcont");
+
+ Builder.CreateCondBr(CondV, ThenBB, ElseBB);
+
+ // Emit then value.
+ Builder.SetInsertPoint(ThenBB);
+
+ Value *ThenV = Then-&gt;Codegen();
+ if (ThenV == 0) return 0;
+
+ Builder.CreateBr(MergeBB);
+ // Codegen of 'Then' can change the current block, update ThenBB for the PHI.
+ ThenBB = Builder.GetInsertBlock();
+
+ // Emit else block.
+ TheFunction-&gt;getBasicBlockList().push_back(ElseBB);
+ Builder.SetInsertPoint(ElseBB);
+
+ Value *ElseV = Else-&gt;Codegen();
+ if (ElseV == 0) return 0;
+
+ Builder.CreateBr(MergeBB);
+ // Codegen of 'Else' can change the current block, update ElseBB for the PHI.
+ ElseBB = Builder.GetInsertBlock();
+
+ // Emit merge block.
+ TheFunction-&gt;getBasicBlockList().push_back(MergeBB);
+ Builder.SetInsertPoint(MergeBB);
+ PHINode *PN = Builder.CreatePHI(Type::getDoubleTy(getGlobalContext()), 2,
+ "iftmp");
+
+ PN-&gt;addIncoming(ThenV, ThenBB);
+ PN-&gt;addIncoming(ElseV, ElseBB);
+ return PN;
+}
+
+Value *ForExprAST::Codegen() {
+ // Output this as:
+ // ...
+ // start = startexpr
+ // goto loop
+ // loop:
+ // variable = phi [start, loopheader], [nextvariable, loopend]
+ // ...
+ // bodyexpr
+ // ...
+ // loopend:
+ // step = stepexpr
+ // nextvariable = variable + step
+ // endcond = endexpr
+ // br endcond, loop, endloop
+ // outloop:
+
+ // Emit the start code first, without 'variable' in scope.
+ Value *StartVal = Start-&gt;Codegen();
+ if (StartVal == 0) return 0;
+
+ // Make the new basic block for the loop header, inserting after current
+ // block.
+ Function *TheFunction = Builder.GetInsertBlock()-&gt;getParent();
+ BasicBlock *PreheaderBB = Builder.GetInsertBlock();
+ BasicBlock *LoopBB = BasicBlock::Create(getGlobalContext(), "loop", TheFunction);
+
+ // Insert an explicit fall through from the current block to the LoopBB.
+ Builder.CreateBr(LoopBB);
+
+ // Start insertion in LoopBB.
+ Builder.SetInsertPoint(LoopBB);
+
+ // Start the PHI node with an entry for Start.
+ PHINode *Variable = Builder.CreatePHI(Type::getDoubleTy(getGlobalContext()), 2, VarName.c_str());
+ Variable-&gt;addIncoming(StartVal, PreheaderBB);
+
+ // Within the loop, the variable is defined equal to the PHI node. If it
+ // shadows an existing variable, we have to restore it, so save it now.
+ Value *OldVal = NamedValues[VarName];
+ NamedValues[VarName] = Variable;
+
+ // Emit the body of the loop. This, like any other expr, can change the
+ // current BB. Note that we ignore the value computed by the body, but don't
+ // allow an error.
+ if (Body-&gt;Codegen() == 0)
+ return 0;
+
+ // Emit the step value.
+ Value *StepVal;
+ if (Step) {
+ StepVal = Step-&gt;Codegen();
+ if (StepVal == 0) return 0;
+ } else {
+ // If not specified, use 1.0.
+ StepVal = ConstantFP::get(getGlobalContext(), APFloat(1.0));
+ }
+
+ Value *NextVar = Builder.CreateFAdd(Variable, StepVal, "nextvar");
+
+ // Compute the end condition.
+ Value *EndCond = End-&gt;Codegen();
+ if (EndCond == 0) return EndCond;
+
+ // Convert condition to a bool by comparing equal to 0.0.
+ EndCond = Builder.CreateFCmpONE(EndCond,
+ ConstantFP::get(getGlobalContext(), APFloat(0.0)),
+ "loopcond");
+
+ // Create the "after loop" block and insert it.
+ BasicBlock *LoopEndBB = Builder.GetInsertBlock();
+ BasicBlock *AfterBB = BasicBlock::Create(getGlobalContext(), "afterloop", TheFunction);
+
+ // Insert the conditional branch into the end of LoopEndBB.
+ Builder.CreateCondBr(EndCond, LoopBB, AfterBB);
+
+ // Any new code will be inserted in AfterBB.
+ Builder.SetInsertPoint(AfterBB);
+
+ // Add a new entry to the PHI node for the backedge.
+ Variable-&gt;addIncoming(NextVar, LoopEndBB);
+
+ // Restore the unshadowed variable.
+ if (OldVal)
+ NamedValues[VarName] = OldVal;
+ else
+ NamedValues.erase(VarName);
+
+
+ // for expr always returns 0.0.
+ return Constant::getNullValue(Type::getDoubleTy(getGlobalContext()));
+}
+
+Function *PrototypeAST::Codegen() {
+ // Make the function type: double(double,double) etc.
+ std::vector&lt;Type*&gt; Doubles(Args.size(),
+ Type::getDoubleTy(getGlobalContext()));
+ FunctionType *FT = FunctionType::get(Type::getDoubleTy(getGlobalContext()),
+ Doubles, false);
+
+ Function *F = Function::Create(FT, Function::ExternalLinkage, Name, TheModule);
+
+ // If F conflicted, there was already something named 'Name'. If it has a
+ // body, don't allow redefinition or reextern.
+ if (F-&gt;getName() != Name) {
+ // Delete the one we just made and get the existing one.
+ F-&gt;eraseFromParent();
+ F = TheModule-&gt;getFunction(Name);
+
+ // If F already has a body, reject this.
+ if (!F-&gt;empty()) {
+ ErrorF("redefinition of function");
+ return 0;
+ }
+
+ // If F took a different number of args, reject.
+ if (F-&gt;arg_size() != Args.size()) {
+ ErrorF("redefinition of function with different # args");
+ return 0;
+ }
+ }
+
+ // Set names for all arguments.
+ unsigned Idx = 0;
+ for (Function::arg_iterator AI = F-&gt;arg_begin(); Idx != Args.size();
+ ++AI, ++Idx) {
+ AI-&gt;setName(Args[Idx]);
+
+ // Add arguments to variable symbol table.
+ NamedValues[Args[Idx]] = AI;
+ }
+
+ return F;
+}
+
+Function *FunctionAST::Codegen() {
+ NamedValues.clear();
+
+ Function *TheFunction = Proto-&gt;Codegen();
+ if (TheFunction == 0)
+ return 0;
+
+ // Create a new basic block to start insertion into.
+ BasicBlock *BB = BasicBlock::Create(getGlobalContext(), "entry", TheFunction);
+ Builder.SetInsertPoint(BB);
+
+ if (Value *RetVal = Body-&gt;Codegen()) {
+ // Finish off the function.
+ Builder.CreateRet(RetVal);
+
+ // Validate the generated code, checking for consistency.
+ verifyFunction(*TheFunction);
+
+ // Optimize the function.
+ TheFPM-&gt;run(*TheFunction);
+
+ return TheFunction;
+ }
+
+ // Error reading body, remove function.
+ TheFunction-&gt;eraseFromParent();
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Top-Level parsing and JIT Driver
+//===----------------------------------------------------------------------===//
+
+static ExecutionEngine *TheExecutionEngine;
+
+static void HandleDefinition() {
+ if (FunctionAST *F = ParseDefinition()) {
+ if (Function *LF = F-&gt;Codegen()) {
+ fprintf(stderr, "Read function definition:");
+ LF-&gt;dump();
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+static void HandleExtern() {
+ if (PrototypeAST *P = ParseExtern()) {
+ if (Function *F = P-&gt;Codegen()) {
+ fprintf(stderr, "Read extern: ");
+ F-&gt;dump();
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+static void HandleTopLevelExpression() {
+ // Evaluate a top-level expression into an anonymous function.
+ if (FunctionAST *F = ParseTopLevelExpr()) {
+ if (Function *LF = F-&gt;Codegen()) {
+ // JIT the function, returning a function pointer.
+ void *FPtr = TheExecutionEngine-&gt;getPointerToFunction(LF);
+
+ // Cast it to the right type (takes no arguments, returns a double) so we
+ // can call it as a native function.
+ double (*FP)() = (double (*)())(intptr_t)FPtr;
+ fprintf(stderr, "Evaluated to %f\n", FP());
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+/// top ::= definition | external | expression | ';'
+static void MainLoop() {
+ while (1) {
+ fprintf(stderr, "ready&gt; ");
+ switch (CurTok) {
+ case tok_eof: return;
+ case ';': getNextToken(); break; // ignore top-level semicolons.
+ case tok_def: HandleDefinition(); break;
+ case tok_extern: HandleExtern(); break;
+ default: HandleTopLevelExpression(); break;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// "Library" functions that can be "extern'd" from user code.
+//===----------------------------------------------------------------------===//
+
+/// putchard - putchar that takes a double and returns 0.
+extern "C"
+double putchard(double X) {
+ putchar((char)X);
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Main driver code.
+//===----------------------------------------------------------------------===//
+
+int main() {
+ InitializeNativeTarget();
+ LLVMContext &amp;Context = getGlobalContext();
+
+ // Install standard binary operators.
+ // 1 is lowest precedence.
+ BinopPrecedence['&lt;'] = 10;
+ BinopPrecedence['+'] = 20;
+ BinopPrecedence['-'] = 20;
+ BinopPrecedence['*'] = 40; // highest.
+
+ // Prime the first token.
+ fprintf(stderr, "ready&gt; ");
+ getNextToken();
+
+ // Make the module, which holds all the code.
+ TheModule = new Module("my cool jit", Context);
+
+ // Create the JIT. This takes ownership of the module.
+ std::string ErrStr;
+ TheExecutionEngine = EngineBuilder(TheModule).setErrorStr(&amp;ErrStr).create();
+ if (!TheExecutionEngine) {
+ fprintf(stderr, "Could not create ExecutionEngine: %s\n", ErrStr.c_str());
+ exit(1);
+ }
+
+ FunctionPassManager OurFPM(TheModule);
+
+ // Set up the optimizer pipeline. Start with registering info about how the
+ // target lays out data structures.
+ OurFPM.add(new TargetData(*TheExecutionEngine-&gt;getTargetData()));
+ // Provide basic AliasAnalysis support for GVN.
+ OurFPM.add(createBasicAliasAnalysisPass());
+ // Do simple "peephole" optimizations and bit-twiddling optzns.
+ OurFPM.add(createInstructionCombiningPass());
+ // Reassociate expressions.
+ OurFPM.add(createReassociatePass());
+ // Eliminate Common SubExpressions.
+ OurFPM.add(createGVNPass());
+ // Simplify the control flow graph (deleting unreachable blocks, etc).
+ OurFPM.add(createCFGSimplificationPass());
+
+ OurFPM.doInitialization();
+
+ // Set the global so the code gen can use this.
+ TheFPM = &amp;OurFPM;
+
+ // Run the main "interpreter loop" now.
+ MainLoop();
+
+ TheFPM = 0;
+
+ // Print out all of the generated code.
+ TheModule-&gt;dump();
+
+ return 0;
+}
+</pre>
+</div>
+
+<a href="LangImpl6.html">Next: Extending the language: user-defined operators</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/LangImpl6.html b/docs/tutorial/LangImpl6.html
new file mode 100644
index 00000000000..a76298012fd
--- /dev/null
+++ b/docs/tutorial/LangImpl6.html
@@ -0,0 +1,1829 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Extending the Language: User-defined Operators</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Extending the Language: User-defined Operators</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 6
+ <ol>
+ <li><a href="#intro">Chapter 6 Introduction</a></li>
+ <li><a href="#idea">User-defined Operators: the Idea</a></li>
+ <li><a href="#binary">User-defined Binary Operators</a></li>
+ <li><a href="#unary">User-defined Unary Operators</a></li>
+ <li><a href="#example">Kicking the Tires</a></li>
+ <li><a href="#code">Full Code Listing</a></li>
+ </ol>
+</li>
+<li><a href="LangImpl7.html">Chapter 7</a>: Extending the Language: Mutable
+Variables / SSA Construction</li>
+</ul>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Chapter 6 Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to Chapter 6 of the "<a href="index.html">Implementing a language
+with LLVM</a>" tutorial. At this point in our tutorial, we now have a fully
+functional language that is fairly minimal, but also useful. There
+is still one big problem with it, however. Our language doesn't have many
+useful operators (like division, logical negation, or even any comparisons
+besides less-than).</p>
+
+<p>This chapter of the tutorial takes a wild digression into adding user-defined
+operators to the simple and beautiful Kaleidoscope language. This digression now gives
+us a simple and ugly language in some ways, but also a powerful one at the same time.
+One of the great things about creating your own language is that you get to
+decide what is good or bad. In this tutorial we'll assume that it is okay to
+use this as a way to show some interesting parsing techniques.</p>
+
+<p>At the end of this tutorial, we'll run through an example Kaleidoscope
+application that <a href="#example">renders the Mandelbrot set</a>. This gives
+an example of what you can build with Kaleidoscope and its feature set.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="idea">User-defined Operators: the Idea</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+The "operator overloading" that we will add to Kaleidoscope is more general than
+languages like C++. In C++, you are only allowed to redefine existing
+operators: you can't programatically change the grammar, introduce new
+operators, change precedence levels, etc. In this chapter, we will add this
+capability to Kaleidoscope, which will let the user round out the set of
+operators that are supported.</p>
+
+<p>The point of going into user-defined operators in a tutorial like this is to
+show the power and flexibility of using a hand-written parser. Thus far, the parser
+we have been implementing uses recursive descent for most parts of the grammar and
+operator precedence parsing for the expressions. See <a
+href="LangImpl2.html">Chapter 2</a> for details. Without using operator
+precedence parsing, it would be very difficult to allow the programmer to
+introduce new operators into the grammar: the grammar is dynamically extensible
+as the JIT runs.</p>
+
+<p>The two specific features we'll add are programmable unary operators (right
+now, Kaleidoscope has no unary operators at all) as well as binary operators.
+An example of this is:</p>
+
+<div class="doc_code">
+<pre>
+# Logical unary not.
+def unary!(v)
+ if v then
+ 0
+ else
+ 1;
+
+# Define &gt; with the same precedence as &lt;.
+def binary&gt; 10 (LHS RHS)
+ RHS &lt; LHS;
+
+# Binary "logical or", (note that it does not "short circuit")
+def binary| 5 (LHS RHS)
+ if LHS then
+ 1
+ else if RHS then
+ 1
+ else
+ 0;
+
+# Define = with slightly lower precedence than relationals.
+def binary= 9 (LHS RHS)
+ !(LHS &lt; RHS | LHS &gt; RHS);
+</pre>
+</div>
+
+<p>Many languages aspire to being able to implement their standard runtime
+library in the language itself. In Kaleidoscope, we can implement significant
+parts of the language in the library!</p>
+
+<p>We will break down implementation of these features into two parts:
+implementing support for user-defined binary operators and adding unary
+operators.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="binary">User-defined Binary Operators</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Adding support for user-defined binary operators is pretty simple with our
+current framework. We'll first add support for the unary/binary keywords:</p>
+
+<div class="doc_code">
+<pre>
+enum Token {
+ ...
+ <b>// operators
+ tok_binary = -11, tok_unary = -12</b>
+};
+...
+static int gettok() {
+...
+ if (IdentifierStr == "for") return tok_for;
+ if (IdentifierStr == "in") return tok_in;
+ <b>if (IdentifierStr == "binary") return tok_binary;
+ if (IdentifierStr == "unary") return tok_unary;</b>
+ return tok_identifier;
+</pre>
+</div>
+
+<p>This just adds lexer support for the unary and binary keywords, like we
+did in <a href="LangImpl5.html#iflexer">previous chapters</a>. One nice thing
+about our current AST, is that we represent binary operators with full generalisation
+by using their ASCII code as the opcode. For our extended operators, we'll use this
+same representation, so we don't need any new AST or parser support.</p>
+
+<p>On the other hand, we have to be able to represent the definitions of these
+new operators, in the "def binary| 5" part of the function definition. In our
+grammar so far, the "name" for the function definition is parsed as the
+"prototype" production and into the <tt>PrototypeAST</tt> AST node. To
+represent our new user-defined operators as prototypes, we have to extend
+the <tt>PrototypeAST</tt> AST node like this:</p>
+
+<div class="doc_code">
+<pre>
+/// PrototypeAST - This class represents the "prototype" for a function,
+/// which captures its argument names as well as if it is an operator.
+class PrototypeAST {
+ std::string Name;
+ std::vector&lt;std::string&gt; Args;
+ <b>bool isOperator;
+ unsigned Precedence; // Precedence if a binary op.</b>
+public:
+ PrototypeAST(const std::string &amp;name, const std::vector&lt;std::string&gt; &amp;args,
+ <b>bool isoperator = false, unsigned prec = 0</b>)
+ : Name(name), Args(args), <b>isOperator(isoperator), Precedence(prec)</b> {}
+
+ <b>bool isUnaryOp() const { return isOperator &amp;&amp; Args.size() == 1; }
+ bool isBinaryOp() const { return isOperator &amp;&amp; Args.size() == 2; }
+
+ char getOperatorName() const {
+ assert(isUnaryOp() || isBinaryOp());
+ return Name[Name.size()-1];
+ }
+
+ unsigned getBinaryPrecedence() const { return Precedence; }</b>
+
+ Function *Codegen();
+};
+</pre>
+</div>
+
+<p>Basically, in addition to knowing a name for the prototype, we now keep track
+of whether it was an operator, and if it was, what precedence level the operator
+is at. The precedence is only used for binary operators (as you'll see below,
+it just doesn't apply for unary operators). Now that we have a way to represent
+the prototype for a user-defined operator, we need to parse it:</p>
+
+<div class="doc_code">
+<pre>
+/// prototype
+/// ::= id '(' id* ')'
+<b>/// ::= binary LETTER number? (id, id)</b>
+static PrototypeAST *ParsePrototype() {
+ std::string FnName;
+
+ <b>unsigned Kind = 0; // 0 = identifier, 1 = unary, 2 = binary.
+ unsigned BinaryPrecedence = 30;</b>
+
+ switch (CurTok) {
+ default:
+ return ErrorP("Expected function name in prototype");
+ case tok_identifier:
+ FnName = IdentifierStr;
+ Kind = 0;
+ getNextToken();
+ break;
+ <b>case tok_binary:
+ getNextToken();
+ if (!isascii(CurTok))
+ return ErrorP("Expected binary operator");
+ FnName = "binary";
+ FnName += (char)CurTok;
+ Kind = 2;
+ getNextToken();
+
+ // Read the precedence if present.
+ if (CurTok == tok_number) {
+ if (NumVal &lt; 1 || NumVal &gt; 100)
+ return ErrorP("Invalid precedecnce: must be 1..100");
+ BinaryPrecedence = (unsigned)NumVal;
+ getNextToken();
+ }
+ break;</b>
+ }
+
+ if (CurTok != '(')
+ return ErrorP("Expected '(' in prototype");
+
+ std::vector&lt;std::string&gt; ArgNames;
+ while (getNextToken() == tok_identifier)
+ ArgNames.push_back(IdentifierStr);
+ if (CurTok != ')')
+ return ErrorP("Expected ')' in prototype");
+
+ // success.
+ getNextToken(); // eat ')'.
+
+ <b>// Verify right number of names for operator.
+ if (Kind &amp;&amp; ArgNames.size() != Kind)
+ return ErrorP("Invalid number of operands for operator");
+
+ return new PrototypeAST(FnName, ArgNames, Kind != 0, BinaryPrecedence);</b>
+}
+</pre>
+</div>
+
+<p>This is all fairly straightforward parsing code, and we have already seen
+a lot of similar code in the past. One interesting part about the code above is
+the couple lines that set up <tt>FnName</tt> for binary operators. This builds names
+like "binary@" for a newly defined "@" operator. This then takes advantage of the
+fact that symbol names in the LLVM symbol table are allowed to have any character in
+them, including embedded nul characters.</p>
+
+<p>The next interesting thing to add, is codegen support for these binary operators.
+Given our current structure, this is a simple addition of a default case for our
+existing binary operator node:</p>
+
+<div class="doc_code">
+<pre>
+Value *BinaryExprAST::Codegen() {
+ Value *L = LHS-&gt;Codegen();
+ Value *R = RHS-&gt;Codegen();
+ if (L == 0 || R == 0) return 0;
+
+ switch (Op) {
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
+ case '&lt;':
+ L = Builder.CreateFCmpULT(L, R, "cmptmp");
+ // Convert bool 0/1 to double 0.0 or 1.0
+ return Builder.CreateUIToFP(L, Type::getDoubleTy(getGlobalContext()),
+ "booltmp");
+ <b>default: break;</b>
+ }
+
+ <b>// If it wasn't a builtin binary operator, it must be a user defined one. Emit
+ // a call to it.
+ Function *F = TheModule-&gt;getFunction(std::string("binary")+Op);
+ assert(F &amp;&amp; "binary operator not found!");
+
+ Value *Ops[2] = { L, R };
+ return Builder.CreateCall(F, Ops, "binop");</b>
+}
+
+</pre>
+</div>
+
+<p>As you can see above, the new code is actually really simple. It just does
+a lookup for the appropriate operator in the symbol table and generates a
+function call to it. Since user-defined operators are just built as normal
+functions (because the "prototype" boils down to a function with the right
+name) everything falls into place.</p>
+
+<p>The final piece of code we are missing, is a bit of top-level magic:</p>
+
+<div class="doc_code">
+<pre>
+Function *FunctionAST::Codegen() {
+ NamedValues.clear();
+
+ Function *TheFunction = Proto->Codegen();
+ if (TheFunction == 0)
+ return 0;
+
+ <b>// If this is an operator, install it.
+ if (Proto-&gt;isBinaryOp())
+ BinopPrecedence[Proto->getOperatorName()] = Proto->getBinaryPrecedence();</b>
+
+ // Create a new basic block to start insertion into.
+ BasicBlock *BB = BasicBlock::Create(getGlobalContext(), "entry", TheFunction);
+ Builder.SetInsertPoint(BB);
+
+ if (Value *RetVal = Body-&gt;Codegen()) {
+ ...
+</pre>
+</div>
+
+<p>Basically, before codegening a function, if it is a user-defined operator, we
+register it in the precedence table. This allows the binary operator parsing
+logic we already have in place to handle it. Since we are working on a fully-general operator precedence parser, this is all we need to do to "extend the grammar".</p>
+
+<p>Now we have useful user-defined binary operators. This builds a lot
+on the previous framework we built for other operators. Adding unary operators
+is a bit more challenging, because we don't have any framework for it yet - lets
+see what it takes.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="unary">User-defined Unary Operators</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Since we don't currently support unary operators in the Kaleidoscope
+language, we'll need to add everything to support them. Above, we added simple
+support for the 'unary' keyword to the lexer. In addition to that, we need an
+AST node:</p>
+
+<div class="doc_code">
+<pre>
+/// UnaryExprAST - Expression class for a unary operator.
+class UnaryExprAST : public ExprAST {
+ char Opcode;
+ ExprAST *Operand;
+public:
+ UnaryExprAST(char opcode, ExprAST *operand)
+ : Opcode(opcode), Operand(operand) {}
+ virtual Value *Codegen();
+};
+</pre>
+</div>
+
+<p>This AST node is very simple and obvious by now. It directly mirrors the
+binary operator AST node, except that it only has one child. With this, we
+need to add the parsing logic. Parsing a unary operator is pretty simple: we'll
+add a new function to do it:</p>
+
+<div class="doc_code">
+<pre>
+/// unary
+/// ::= primary
+/// ::= '!' unary
+static ExprAST *ParseUnary() {
+ // If the current token is not an operator, it must be a primary expr.
+ if (!isascii(CurTok) || CurTok == '(' || CurTok == ',')
+ return ParsePrimary();
+
+ // If this is a unary operator, read it.
+ int Opc = CurTok;
+ getNextToken();
+ if (ExprAST *Operand = ParseUnary())
+ return new UnaryExprAST(Opc, Operand);
+ return 0;
+}
+</pre>
+</div>
+
+<p>The grammar we add is pretty straightforward here. If we see a unary
+operator when parsing a primary operator, we eat the operator as a prefix and
+parse the remaining piece as another unary operator. This allows us to handle
+multiple unary operators (e.g. "!!x"). Note that unary operators can't have
+ambiguous parses like binary operators can, so there is no need for precedence
+information.</p>
+
+<p>The problem with this function, is that we need to call ParseUnary from somewhere.
+To do this, we change previous callers of ParsePrimary to call ParseUnary
+instead:</p>
+
+<div class="doc_code">
+<pre>
+/// binoprhs
+/// ::= ('+' unary)*
+static ExprAST *ParseBinOpRHS(int ExprPrec, ExprAST *LHS) {
+ ...
+ <b>// Parse the unary expression after the binary operator.
+ ExprAST *RHS = ParseUnary();
+ if (!RHS) return 0;</b>
+ ...
+}
+/// expression
+/// ::= unary binoprhs
+///
+static ExprAST *ParseExpression() {
+ <b>ExprAST *LHS = ParseUnary();</b>
+ if (!LHS) return 0;
+
+ return ParseBinOpRHS(0, LHS);
+}
+</pre>
+</div>
+
+<p>With these two simple changes, we are now able to parse unary operators and build the
+AST for them. Next up, we need to add parser support for prototypes, to parse
+the unary operator prototype. We extend the binary operator code above
+with:</p>
+
+<div class="doc_code">
+<pre>
+/// prototype
+/// ::= id '(' id* ')'
+/// ::= binary LETTER number? (id, id)
+<b>/// ::= unary LETTER (id)</b>
+static PrototypeAST *ParsePrototype() {
+ std::string FnName;
+
+ unsigned Kind = 0; // 0 = identifier, 1 = unary, 2 = binary.
+ unsigned BinaryPrecedence = 30;
+
+ switch (CurTok) {
+ default:
+ return ErrorP("Expected function name in prototype");
+ case tok_identifier:
+ FnName = IdentifierStr;
+ Kind = 0;
+ getNextToken();
+ break;
+ <b>case tok_unary:
+ getNextToken();
+ if (!isascii(CurTok))
+ return ErrorP("Expected unary operator");
+ FnName = "unary";
+ FnName += (char)CurTok;
+ Kind = 1;
+ getNextToken();
+ break;</b>
+ case tok_binary:
+ ...
+</pre>
+</div>
+
+<p>As with binary operators, we name unary operators with a name that includes
+the operator character. This assists us at code generation time. Speaking of,
+the final piece we need to add is codegen support for unary operators. It looks
+like this:</p>
+
+<div class="doc_code">
+<pre>
+Value *UnaryExprAST::Codegen() {
+ Value *OperandV = Operand->Codegen();
+ if (OperandV == 0) return 0;
+
+ Function *F = TheModule->getFunction(std::string("unary")+Opcode);
+ if (F == 0)
+ return ErrorV("Unknown unary operator");
+
+ return Builder.CreateCall(F, OperandV, "unop");
+}
+</pre>
+</div>
+
+<p>This code is similar to, but simpler than, the code for binary operators. It
+is simpler primarily because it doesn't need to handle any predefined operators.
+</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="example">Kicking the Tires</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>It is somewhat hard to believe, but with a few simple extensions we've
+covered in the last chapters, we have grown a real-ish language. With this, we
+can do a lot of interesting things, including I/O, math, and a bunch of other
+things. For example, we can now add a nice sequencing operator (printd is
+defined to print out the specified value and a newline):</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>extern printd(x);</b>
+Read extern:
+declare double @printd(double)
+
+ready&gt; <b>def binary : 1 (x y) 0; # Low-precedence operator that ignores operands.</b>
+..
+ready&gt; <b>printd(123) : printd(456) : printd(789);</b>
+123.000000
+456.000000
+789.000000
+Evaluated to 0.000000
+</pre>
+</div>
+
+<p>We can also define a bunch of other "primitive" operations, such as:</p>
+
+<div class="doc_code">
+<pre>
+# Logical unary not.
+def unary!(v)
+ if v then
+ 0
+ else
+ 1;
+
+# Unary negate.
+def unary-(v)
+ 0-v;
+
+# Define &gt; with the same precedence as &lt;.
+def binary&gt; 10 (LHS RHS)
+ RHS &lt; LHS;
+
+# Binary logical or, which does not short circuit.
+def binary| 5 (LHS RHS)
+ if LHS then
+ 1
+ else if RHS then
+ 1
+ else
+ 0;
+
+# Binary logical and, which does not short circuit.
+def binary&amp; 6 (LHS RHS)
+ if !LHS then
+ 0
+ else
+ !!RHS;
+
+# Define = with slightly lower precedence than relationals.
+def binary = 9 (LHS RHS)
+ !(LHS &lt; RHS | LHS &gt; RHS);
+
+# Define ':' for sequencing: as a low-precedence operator that ignores operands
+# and just returns the RHS.
+def binary : 1 (x y) y;
+</pre>
+</div>
+
+
+<p>Given the previous if/then/else support, we can also define interesting
+functions for I/O. For example, the following prints out a character whose
+"density" reflects the value passed in: the lower the value, the denser the
+character:</p>
+
+<div class="doc_code">
+<pre>
+ready&gt;
+<b>
+extern putchard(char)
+def printdensity(d)
+ if d &gt; 8 then
+ putchard(32) # ' '
+ else if d &gt; 4 then
+ putchard(46) # '.'
+ else if d &gt; 2 then
+ putchard(43) # '+'
+ else
+ putchard(42); # '*'</b>
+...
+ready&gt; <b>printdensity(1): printdensity(2): printdensity(3):
+ printdensity(4): printdensity(5): printdensity(9):
+ putchard(10);</b>
+**++.
+Evaluated to 0.000000
+</pre>
+</div>
+
+<p>Based on these simple primitive operations, we can start to define more
+interesting things. For example, here's a little function that solves for the
+number of iterations it takes a function in the complex plane to
+converge:</p>
+
+<div class="doc_code">
+<pre>
+# Determine whether the specific location diverges.
+# Solve for z = z^2 + c in the complex plane.
+def mandleconverger(real imag iters creal cimag)
+ if iters &gt; 255 | (real*real + imag*imag &gt; 4) then
+ iters
+ else
+ mandleconverger(real*real - imag*imag + creal,
+ 2*real*imag + cimag,
+ iters+1, creal, cimag);
+
+# Return the number of iterations required for the iteration to escape
+def mandleconverge(real imag)
+ mandleconverger(real, imag, 0, real, imag);
+</pre>
+</div>
+
+<p>This "<code>z = z<sup>2</sup> + c</code>" function is a beautiful little
+creature that is the basis for computation of
+the <a href="http://en.wikipedia.org/wiki/Mandelbrot_set">Mandelbrot Set</a>.
+Our <tt>mandelconverge</tt> function returns the number of iterations that it
+takes for a complex orbit to escape, saturating to 255. This is not a very
+useful function by itself, but if you plot its value over a two-dimensional
+plane, you can see the Mandelbrot set. Given that we are limited to using
+putchard here, our amazing graphical output is limited, but we can whip together
+something using the density plotter above:</p>
+
+<div class="doc_code">
+<pre>
+# Compute and plot the mandlebrot set with the specified 2 dimensional range
+# info.
+def mandelhelp(xmin xmax xstep ymin ymax ystep)
+ for y = ymin, y &lt; ymax, ystep in (
+ (for x = xmin, x &lt; xmax, xstep in
+ printdensity(mandleconverge(x,y)))
+ : putchard(10)
+ )
+
+# mandel - This is a convenient helper function for plotting the mandelbrot set
+# from the specified position with the specified Magnification.
+def mandel(realstart imagstart realmag imagmag)
+ mandelhelp(realstart, realstart+realmag*78, realmag,
+ imagstart, imagstart+imagmag*40, imagmag);
+</pre>
+</div>
+
+<p>Given this, we can try plotting out the mandlebrot set! Lets try it out:</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>mandel(-2.3, -1.3, 0.05, 0.07);</b>
+*******************************+++++++++++*************************************
+*************************+++++++++++++++++++++++*******************************
+**********************+++++++++++++++++++++++++++++****************************
+*******************+++++++++++++++++++++.. ...++++++++*************************
+*****************++++++++++++++++++++++.... ...+++++++++***********************
+***************+++++++++++++++++++++++..... ...+++++++++*********************
+**************+++++++++++++++++++++++.... ....+++++++++********************
+*************++++++++++++++++++++++...... .....++++++++*******************
+************+++++++++++++++++++++....... .......+++++++******************
+***********+++++++++++++++++++.... ... .+++++++*****************
+**********+++++++++++++++++....... .+++++++****************
+*********++++++++++++++........... ...+++++++***************
+********++++++++++++............ ...++++++++**************
+********++++++++++... .......... .++++++++**************
+*******+++++++++..... .+++++++++*************
+*******++++++++...... ..+++++++++*************
+*******++++++....... ..+++++++++*************
+*******+++++...... ..+++++++++*************
+*******.... .... ...+++++++++*************
+*******.... . ...+++++++++*************
+*******+++++...... ...+++++++++*************
+*******++++++....... ..+++++++++*************
+*******++++++++...... .+++++++++*************
+*******+++++++++..... ..+++++++++*************
+********++++++++++... .......... .++++++++**************
+********++++++++++++............ ...++++++++**************
+*********++++++++++++++.......... ...+++++++***************
+**********++++++++++++++++........ .+++++++****************
+**********++++++++++++++++++++.... ... ..+++++++****************
+***********++++++++++++++++++++++....... .......++++++++*****************
+************+++++++++++++++++++++++...... ......++++++++******************
+**************+++++++++++++++++++++++.... ....++++++++********************
+***************+++++++++++++++++++++++..... ...+++++++++*********************
+*****************++++++++++++++++++++++.... ...++++++++***********************
+*******************+++++++++++++++++++++......++++++++*************************
+*********************++++++++++++++++++++++.++++++++***************************
+*************************+++++++++++++++++++++++*******************************
+******************************+++++++++++++************************************
+*******************************************************************************
+*******************************************************************************
+*******************************************************************************
+Evaluated to 0.000000
+ready&gt; <b>mandel(-2, -1, 0.02, 0.04);</b>
+**************************+++++++++++++++++++++++++++++++++++++++++++++++++++++
+***********************++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+*********************+++++++++++++++++++++++++++++++++++++++++++++++++++++++++.
+*******************+++++++++++++++++++++++++++++++++++++++++++++++++++++++++...
+*****************+++++++++++++++++++++++++++++++++++++++++++++++++++++++++.....
+***************++++++++++++++++++++++++++++++++++++++++++++++++++++++++........
+**************++++++++++++++++++++++++++++++++++++++++++++++++++++++...........
+************+++++++++++++++++++++++++++++++++++++++++++++++++++++..............
+***********++++++++++++++++++++++++++++++++++++++++++++++++++........ .
+**********++++++++++++++++++++++++++++++++++++++++++++++.............
+********+++++++++++++++++++++++++++++++++++++++++++..................
+*******+++++++++++++++++++++++++++++++++++++++.......................
+******+++++++++++++++++++++++++++++++++++...........................
+*****++++++++++++++++++++++++++++++++............................
+*****++++++++++++++++++++++++++++...............................
+****++++++++++++++++++++++++++...... .........................
+***++++++++++++++++++++++++......... ...... ...........
+***++++++++++++++++++++++............
+**+++++++++++++++++++++..............
+**+++++++++++++++++++................
+*++++++++++++++++++.................
+*++++++++++++++++............ ...
+*++++++++++++++..............
+*+++....++++................
+*.......... ...........
+*
+*.......... ...........
+*+++....++++................
+*++++++++++++++..............
+*++++++++++++++++............ ...
+*++++++++++++++++++.................
+**+++++++++++++++++++................
+**+++++++++++++++++++++..............
+***++++++++++++++++++++++............
+***++++++++++++++++++++++++......... ...... ...........
+****++++++++++++++++++++++++++...... .........................
+*****++++++++++++++++++++++++++++...............................
+*****++++++++++++++++++++++++++++++++............................
+******+++++++++++++++++++++++++++++++++++...........................
+*******+++++++++++++++++++++++++++++++++++++++.......................
+********+++++++++++++++++++++++++++++++++++++++++++..................
+Evaluated to 0.000000
+ready&gt; <b>mandel(-0.9, -1.4, 0.02, 0.03);</b>
+*******************************************************************************
+*******************************************************************************
+*******************************************************************************
+**********+++++++++++++++++++++************************************************
+*+++++++++++++++++++++++++++++++++++++++***************************************
++++++++++++++++++++++++++++++++++++++++++++++**********************************
+++++++++++++++++++++++++++++++++++++++++++++++++++*****************************
+++++++++++++++++++++++++++++++++++++++++++++++++++++++*************************
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++**********************
++++++++++++++++++++++++++++++++++.........++++++++++++++++++*******************
++++++++++++++++++++++++++++++++.... ......+++++++++++++++++++****************
++++++++++++++++++++++++++++++....... ........+++++++++++++++++++**************
+++++++++++++++++++++++++++++........ ........++++++++++++++++++++************
++++++++++++++++++++++++++++......... .. ...+++++++++++++++++++++**********
+++++++++++++++++++++++++++........... ....++++++++++++++++++++++********
+++++++++++++++++++++++++............. .......++++++++++++++++++++++******
++++++++++++++++++++++++............. ........+++++++++++++++++++++++****
+++++++++++++++++++++++........... ..........++++++++++++++++++++++***
+++++++++++++++++++++........... .........++++++++++++++++++++++*
+++++++++++++++++++............ ...........++++++++++++++++++++
+++++++++++++++++............... .............++++++++++++++++++
+++++++++++++++................. ...............++++++++++++++++
+++++++++++++.................. .................++++++++++++++
++++++++++.................. .................+++++++++++++
+++++++........ . ......... ..++++++++++++
+++............ ...... ....++++++++++
+.............. ...++++++++++
+.............. ....+++++++++
+.............. .....++++++++
+............. ......++++++++
+........... .......++++++++
+......... ........+++++++
+......... ........+++++++
+......... ....+++++++
+........ ...+++++++
+....... ...+++++++
+ ....+++++++
+ .....+++++++
+ ....+++++++
+ ....+++++++
+ ....+++++++
+Evaluated to 0.000000
+ready&gt; <b>^D</b>
+</pre>
+</div>
+
+<p>At this point, you may be starting to realize that Kaleidoscope is a real
+and powerful language. It may not be self-similar :), but it can be used to
+plot things that are!</p>
+
+<p>With this, we conclude the "adding user-defined operators" chapter of the
+tutorial. We have successfully augmented our language, adding the ability to extend the
+language in the library, and we have shown how this can be used to build a simple but
+interesting end-user application in Kaleidoscope. At this point, Kaleidoscope
+can build a variety of applications that are functional and can call functions
+with side-effects, but it can't actually define and mutate a variable itself.
+</p>
+
+<p>Strikingly, variable mutation is an important feature of some
+languages, and it is not at all obvious how to <a href="LangImpl7.html">add
+support for mutable variables</a> without having to add an "SSA construction"
+phase to your front-end. In the next chapter, we will describe how you can
+add variable mutation without building SSA in your front-end.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="code">Full Code Listing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Here is the complete code listing for our running example, enhanced with the
+if/then/else and for expressions.. To build this example, use:
+</p>
+
+<div class="doc_code">
+<pre>
+# Compile
+clang++ -g toy.cpp `llvm-config --cppflags --ldflags --libs core jit native` -O3 -o toy
+# Run
+./toy
+</pre>
+</div>
+
+<p>On some platforms, you will need to specify -rdynamic or -Wl,--export-dynamic
+when linking. This ensures that symbols defined in the main executable are
+exported to the dynamic linker and so are available for symbol resolution at
+run time. This is not needed if you compile your support code into a shared
+library, although doing that will cause problems on Windows.</p>
+
+<p>Here is the code:</p>
+
+<div class="doc_code">
+<pre>
+#include "llvm/DerivedTypes.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/JIT.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Support/TargetSelect.h"
+#include &lt;cstdio&gt;
+#include &lt;string&gt;
+#include &lt;map&gt;
+#include &lt;vector&gt;
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Lexer
+//===----------------------------------------------------------------------===//
+
+// The lexer returns tokens [0-255] if it is an unknown character, otherwise one
+// of these for known things.
+enum Token {
+ tok_eof = -1,
+
+ // commands
+ tok_def = -2, tok_extern = -3,
+
+ // primary
+ tok_identifier = -4, tok_number = -5,
+
+ // control
+ tok_if = -6, tok_then = -7, tok_else = -8,
+ tok_for = -9, tok_in = -10,
+
+ // operators
+ tok_binary = -11, tok_unary = -12
+};
+
+static std::string IdentifierStr; // Filled in if tok_identifier
+static double NumVal; // Filled in if tok_number
+
+/// gettok - Return the next token from standard input.
+static int gettok() {
+ static int LastChar = ' ';
+
+ // Skip any whitespace.
+ while (isspace(LastChar))
+ LastChar = getchar();
+
+ if (isalpha(LastChar)) { // identifier: [a-zA-Z][a-zA-Z0-9]*
+ IdentifierStr = LastChar;
+ while (isalnum((LastChar = getchar())))
+ IdentifierStr += LastChar;
+
+ if (IdentifierStr == "def") return tok_def;
+ if (IdentifierStr == "extern") return tok_extern;
+ if (IdentifierStr == "if") return tok_if;
+ if (IdentifierStr == "then") return tok_then;
+ if (IdentifierStr == "else") return tok_else;
+ if (IdentifierStr == "for") return tok_for;
+ if (IdentifierStr == "in") return tok_in;
+ if (IdentifierStr == "binary") return tok_binary;
+ if (IdentifierStr == "unary") return tok_unary;
+ return tok_identifier;
+ }
+
+ if (isdigit(LastChar) || LastChar == '.') { // Number: [0-9.]+
+ std::string NumStr;
+ do {
+ NumStr += LastChar;
+ LastChar = getchar();
+ } while (isdigit(LastChar) || LastChar == '.');
+
+ NumVal = strtod(NumStr.c_str(), 0);
+ return tok_number;
+ }
+
+ if (LastChar == '#') {
+ // Comment until end of line.
+ do LastChar = getchar();
+ while (LastChar != EOF &amp;&amp; LastChar != '\n' &amp;&amp; LastChar != '\r');
+
+ if (LastChar != EOF)
+ return gettok();
+ }
+
+ // Check for end of file. Don't eat the EOF.
+ if (LastChar == EOF)
+ return tok_eof;
+
+ // Otherwise, just return the character as its ascii value.
+ int ThisChar = LastChar;
+ LastChar = getchar();
+ return ThisChar;
+}
+
+//===----------------------------------------------------------------------===//
+// Abstract Syntax Tree (aka Parse Tree)
+//===----------------------------------------------------------------------===//
+
+/// ExprAST - Base class for all expression nodes.
+class ExprAST {
+public:
+ virtual ~ExprAST() {}
+ virtual Value *Codegen() = 0;
+};
+
+/// NumberExprAST - Expression class for numeric literals like "1.0".
+class NumberExprAST : public ExprAST {
+ double Val;
+public:
+ NumberExprAST(double val) : Val(val) {}
+ virtual Value *Codegen();
+};
+
+/// VariableExprAST - Expression class for referencing a variable, like "a".
+class VariableExprAST : public ExprAST {
+ std::string Name;
+public:
+ VariableExprAST(const std::string &amp;name) : Name(name) {}
+ virtual Value *Codegen();
+};
+
+/// UnaryExprAST - Expression class for a unary operator.
+class UnaryExprAST : public ExprAST {
+ char Opcode;
+ ExprAST *Operand;
+public:
+ UnaryExprAST(char opcode, ExprAST *operand)
+ : Opcode(opcode), Operand(operand) {}
+ virtual Value *Codegen();
+};
+
+/// BinaryExprAST - Expression class for a binary operator.
+class BinaryExprAST : public ExprAST {
+ char Op;
+ ExprAST *LHS, *RHS;
+public:
+ BinaryExprAST(char op, ExprAST *lhs, ExprAST *rhs)
+ : Op(op), LHS(lhs), RHS(rhs) {}
+ virtual Value *Codegen();
+};
+
+/// CallExprAST - Expression class for function calls.
+class CallExprAST : public ExprAST {
+ std::string Callee;
+ std::vector&lt;ExprAST*&gt; Args;
+public:
+ CallExprAST(const std::string &amp;callee, std::vector&lt;ExprAST*&gt; &amp;args)
+ : Callee(callee), Args(args) {}
+ virtual Value *Codegen();
+};
+
+/// IfExprAST - Expression class for if/then/else.
+class IfExprAST : public ExprAST {
+ ExprAST *Cond, *Then, *Else;
+public:
+ IfExprAST(ExprAST *cond, ExprAST *then, ExprAST *_else)
+ : Cond(cond), Then(then), Else(_else) {}
+ virtual Value *Codegen();
+};
+
+/// ForExprAST - Expression class for for/in.
+class ForExprAST : public ExprAST {
+ std::string VarName;
+ ExprAST *Start, *End, *Step, *Body;
+public:
+ ForExprAST(const std::string &amp;varname, ExprAST *start, ExprAST *end,
+ ExprAST *step, ExprAST *body)
+ : VarName(varname), Start(start), End(end), Step(step), Body(body) {}
+ virtual Value *Codegen();
+};
+
+/// PrototypeAST - This class represents the "prototype" for a function,
+/// which captures its name, and its argument names (thus implicitly the number
+/// of arguments the function takes), as well as if it is an operator.
+class PrototypeAST {
+ std::string Name;
+ std::vector&lt;std::string&gt; Args;
+ bool isOperator;
+ unsigned Precedence; // Precedence if a binary op.
+public:
+ PrototypeAST(const std::string &amp;name, const std::vector&lt;std::string&gt; &amp;args,
+ bool isoperator = false, unsigned prec = 0)
+ : Name(name), Args(args), isOperator(isoperator), Precedence(prec) {}
+
+ bool isUnaryOp() const { return isOperator &amp;&amp; Args.size() == 1; }
+ bool isBinaryOp() const { return isOperator &amp;&amp; Args.size() == 2; }
+
+ char getOperatorName() const {
+ assert(isUnaryOp() || isBinaryOp());
+ return Name[Name.size()-1];
+ }
+
+ unsigned getBinaryPrecedence() const { return Precedence; }
+
+ Function *Codegen();
+};
+
+/// FunctionAST - This class represents a function definition itself.
+class FunctionAST {
+ PrototypeAST *Proto;
+ ExprAST *Body;
+public:
+ FunctionAST(PrototypeAST *proto, ExprAST *body)
+ : Proto(proto), Body(body) {}
+
+ Function *Codegen();
+};
+
+//===----------------------------------------------------------------------===//
+// Parser
+//===----------------------------------------------------------------------===//
+
+/// CurTok/getNextToken - Provide a simple token buffer. CurTok is the current
+/// token the parser is looking at. getNextToken reads another token from the
+/// lexer and updates CurTok with its results.
+static int CurTok;
+static int getNextToken() {
+ return CurTok = gettok();
+}
+
+/// BinopPrecedence - This holds the precedence for each binary operator that is
+/// defined.
+static std::map&lt;char, int&gt; BinopPrecedence;
+
+/// GetTokPrecedence - Get the precedence of the pending binary operator token.
+static int GetTokPrecedence() {
+ if (!isascii(CurTok))
+ return -1;
+
+ // Make sure it's a declared binop.
+ int TokPrec = BinopPrecedence[CurTok];
+ if (TokPrec &lt;= 0) return -1;
+ return TokPrec;
+}
+
+/// Error* - These are little helper functions for error handling.
+ExprAST *Error(const char *Str) { fprintf(stderr, "Error: %s\n", Str);return 0;}
+PrototypeAST *ErrorP(const char *Str) { Error(Str); return 0; }
+FunctionAST *ErrorF(const char *Str) { Error(Str); return 0; }
+
+static ExprAST *ParseExpression();
+
+/// identifierexpr
+/// ::= identifier
+/// ::= identifier '(' expression* ')'
+static ExprAST *ParseIdentifierExpr() {
+ std::string IdName = IdentifierStr;
+
+ getNextToken(); // eat identifier.
+
+ if (CurTok != '(') // Simple variable ref.
+ return new VariableExprAST(IdName);
+
+ // Call.
+ getNextToken(); // eat (
+ std::vector&lt;ExprAST*&gt; Args;
+ if (CurTok != ')') {
+ while (1) {
+ ExprAST *Arg = ParseExpression();
+ if (!Arg) return 0;
+ Args.push_back(Arg);
+
+ if (CurTok == ')') break;
+
+ if (CurTok != ',')
+ return Error("Expected ')' or ',' in argument list");
+ getNextToken();
+ }
+ }
+
+ // Eat the ')'.
+ getNextToken();
+
+ return new CallExprAST(IdName, Args);
+}
+
+/// numberexpr ::= number
+static ExprAST *ParseNumberExpr() {
+ ExprAST *Result = new NumberExprAST(NumVal);
+ getNextToken(); // consume the number
+ return Result;
+}
+
+/// parenexpr ::= '(' expression ')'
+static ExprAST *ParseParenExpr() {
+ getNextToken(); // eat (.
+ ExprAST *V = ParseExpression();
+ if (!V) return 0;
+
+ if (CurTok != ')')
+ return Error("expected ')'");
+ getNextToken(); // eat ).
+ return V;
+}
+
+/// ifexpr ::= 'if' expression 'then' expression 'else' expression
+static ExprAST *ParseIfExpr() {
+ getNextToken(); // eat the if.
+
+ // condition.
+ ExprAST *Cond = ParseExpression();
+ if (!Cond) return 0;
+
+ if (CurTok != tok_then)
+ return Error("expected then");
+ getNextToken(); // eat the then
+
+ ExprAST *Then = ParseExpression();
+ if (Then == 0) return 0;
+
+ if (CurTok != tok_else)
+ return Error("expected else");
+
+ getNextToken();
+
+ ExprAST *Else = ParseExpression();
+ if (!Else) return 0;
+
+ return new IfExprAST(Cond, Then, Else);
+}
+
+/// forexpr ::= 'for' identifier '=' expr ',' expr (',' expr)? 'in' expression
+static ExprAST *ParseForExpr() {
+ getNextToken(); // eat the for.
+
+ if (CurTok != tok_identifier)
+ return Error("expected identifier after for");
+
+ std::string IdName = IdentifierStr;
+ getNextToken(); // eat identifier.
+
+ if (CurTok != '=')
+ return Error("expected '=' after for");
+ getNextToken(); // eat '='.
+
+
+ ExprAST *Start = ParseExpression();
+ if (Start == 0) return 0;
+ if (CurTok != ',')
+ return Error("expected ',' after for start value");
+ getNextToken();
+
+ ExprAST *End = ParseExpression();
+ if (End == 0) return 0;
+
+ // The step value is optional.
+ ExprAST *Step = 0;
+ if (CurTok == ',') {
+ getNextToken();
+ Step = ParseExpression();
+ if (Step == 0) return 0;
+ }
+
+ if (CurTok != tok_in)
+ return Error("expected 'in' after for");
+ getNextToken(); // eat 'in'.
+
+ ExprAST *Body = ParseExpression();
+ if (Body == 0) return 0;
+
+ return new ForExprAST(IdName, Start, End, Step, Body);
+}
+
+/// primary
+/// ::= identifierexpr
+/// ::= numberexpr
+/// ::= parenexpr
+/// ::= ifexpr
+/// ::= forexpr
+static ExprAST *ParsePrimary() {
+ switch (CurTok) {
+ default: return Error("unknown token when expecting an expression");
+ case tok_identifier: return ParseIdentifierExpr();
+ case tok_number: return ParseNumberExpr();
+ case '(': return ParseParenExpr();
+ case tok_if: return ParseIfExpr();
+ case tok_for: return ParseForExpr();
+ }
+}
+
+/// unary
+/// ::= primary
+/// ::= '!' unary
+static ExprAST *ParseUnary() {
+ // If the current token is not an operator, it must be a primary expr.
+ if (!isascii(CurTok) || CurTok == '(' || CurTok == ',')
+ return ParsePrimary();
+
+ // If this is a unary operator, read it.
+ int Opc = CurTok;
+ getNextToken();
+ if (ExprAST *Operand = ParseUnary())
+ return new UnaryExprAST(Opc, Operand);
+ return 0;
+}
+
+/// binoprhs
+/// ::= ('+' unary)*
+static ExprAST *ParseBinOpRHS(int ExprPrec, ExprAST *LHS) {
+ // If this is a binop, find its precedence.
+ while (1) {
+ int TokPrec = GetTokPrecedence();
+
+ // If this is a binop that binds at least as tightly as the current binop,
+ // consume it, otherwise we are done.
+ if (TokPrec &lt; ExprPrec)
+ return LHS;
+
+ // Okay, we know this is a binop.
+ int BinOp = CurTok;
+ getNextToken(); // eat binop
+
+ // Parse the unary expression after the binary operator.
+ ExprAST *RHS = ParseUnary();
+ if (!RHS) return 0;
+
+ // If BinOp binds less tightly with RHS than the operator after RHS, let
+ // the pending operator take RHS as its LHS.
+ int NextPrec = GetTokPrecedence();
+ if (TokPrec &lt; NextPrec) {
+ RHS = ParseBinOpRHS(TokPrec+1, RHS);
+ if (RHS == 0) return 0;
+ }
+
+ // Merge LHS/RHS.
+ LHS = new BinaryExprAST(BinOp, LHS, RHS);
+ }
+}
+
+/// expression
+/// ::= unary binoprhs
+///
+static ExprAST *ParseExpression() {
+ ExprAST *LHS = ParseUnary();
+ if (!LHS) return 0;
+
+ return ParseBinOpRHS(0, LHS);
+}
+
+/// prototype
+/// ::= id '(' id* ')'
+/// ::= binary LETTER number? (id, id)
+/// ::= unary LETTER (id)
+static PrototypeAST *ParsePrototype() {
+ std::string FnName;
+
+ unsigned Kind = 0; // 0 = identifier, 1 = unary, 2 = binary.
+ unsigned BinaryPrecedence = 30;
+
+ switch (CurTok) {
+ default:
+ return ErrorP("Expected function name in prototype");
+ case tok_identifier:
+ FnName = IdentifierStr;
+ Kind = 0;
+ getNextToken();
+ break;
+ case tok_unary:
+ getNextToken();
+ if (!isascii(CurTok))
+ return ErrorP("Expected unary operator");
+ FnName = "unary";
+ FnName += (char)CurTok;
+ Kind = 1;
+ getNextToken();
+ break;
+ case tok_binary:
+ getNextToken();
+ if (!isascii(CurTok))
+ return ErrorP("Expected binary operator");
+ FnName = "binary";
+ FnName += (char)CurTok;
+ Kind = 2;
+ getNextToken();
+
+ // Read the precedence if present.
+ if (CurTok == tok_number) {
+ if (NumVal &lt; 1 || NumVal &gt; 100)
+ return ErrorP("Invalid precedecnce: must be 1..100");
+ BinaryPrecedence = (unsigned)NumVal;
+ getNextToken();
+ }
+ break;
+ }
+
+ if (CurTok != '(')
+ return ErrorP("Expected '(' in prototype");
+
+ std::vector&lt;std::string&gt; ArgNames;
+ while (getNextToken() == tok_identifier)
+ ArgNames.push_back(IdentifierStr);
+ if (CurTok != ')')
+ return ErrorP("Expected ')' in prototype");
+
+ // success.
+ getNextToken(); // eat ')'.
+
+ // Verify right number of names for operator.
+ if (Kind &amp;&amp; ArgNames.size() != Kind)
+ return ErrorP("Invalid number of operands for operator");
+
+ return new PrototypeAST(FnName, ArgNames, Kind != 0, BinaryPrecedence);
+}
+
+/// definition ::= 'def' prototype expression
+static FunctionAST *ParseDefinition() {
+ getNextToken(); // eat def.
+ PrototypeAST *Proto = ParsePrototype();
+ if (Proto == 0) return 0;
+
+ if (ExprAST *E = ParseExpression())
+ return new FunctionAST(Proto, E);
+ return 0;
+}
+
+/// toplevelexpr ::= expression
+static FunctionAST *ParseTopLevelExpr() {
+ if (ExprAST *E = ParseExpression()) {
+ // Make an anonymous proto.
+ PrototypeAST *Proto = new PrototypeAST("", std::vector&lt;std::string&gt;());
+ return new FunctionAST(Proto, E);
+ }
+ return 0;
+}
+
+/// external ::= 'extern' prototype
+static PrototypeAST *ParseExtern() {
+ getNextToken(); // eat extern.
+ return ParsePrototype();
+}
+
+//===----------------------------------------------------------------------===//
+// Code Generation
+//===----------------------------------------------------------------------===//
+
+static Module *TheModule;
+static IRBuilder&lt;&gt; Builder(getGlobalContext());
+static std::map&lt;std::string, Value*&gt; NamedValues;
+static FunctionPassManager *TheFPM;
+
+Value *ErrorV(const char *Str) { Error(Str); return 0; }
+
+Value *NumberExprAST::Codegen() {
+ return ConstantFP::get(getGlobalContext(), APFloat(Val));
+}
+
+Value *VariableExprAST::Codegen() {
+ // Look this variable up in the function.
+ Value *V = NamedValues[Name];
+ return V ? V : ErrorV("Unknown variable name");
+}
+
+Value *UnaryExprAST::Codegen() {
+ Value *OperandV = Operand-&gt;Codegen();
+ if (OperandV == 0) return 0;
+
+ Function *F = TheModule-&gt;getFunction(std::string("unary")+Opcode);
+ if (F == 0)
+ return ErrorV("Unknown unary operator");
+
+ return Builder.CreateCall(F, OperandV, "unop");
+}
+
+Value *BinaryExprAST::Codegen() {
+ Value *L = LHS-&gt;Codegen();
+ Value *R = RHS-&gt;Codegen();
+ if (L == 0 || R == 0) return 0;
+
+ switch (Op) {
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
+ case '&lt;':
+ L = Builder.CreateFCmpULT(L, R, "cmptmp");
+ // Convert bool 0/1 to double 0.0 or 1.0
+ return Builder.CreateUIToFP(L, Type::getDoubleTy(getGlobalContext()),
+ "booltmp");
+ default: break;
+ }
+
+ // If it wasn't a builtin binary operator, it must be a user defined one. Emit
+ // a call to it.
+ Function *F = TheModule-&gt;getFunction(std::string("binary")+Op);
+ assert(F &amp;&amp; "binary operator not found!");
+
+ Value *Ops[2] = { L, R };
+ return Builder.CreateCall(F, Ops, "binop");
+}
+
+Value *CallExprAST::Codegen() {
+ // Look up the name in the global module table.
+ Function *CalleeF = TheModule-&gt;getFunction(Callee);
+ if (CalleeF == 0)
+ return ErrorV("Unknown function referenced");
+
+ // If argument mismatch error.
+ if (CalleeF-&gt;arg_size() != Args.size())
+ return ErrorV("Incorrect # arguments passed");
+
+ std::vector&lt;Value*&gt; ArgsV;
+ for (unsigned i = 0, e = Args.size(); i != e; ++i) {
+ ArgsV.push_back(Args[i]-&gt;Codegen());
+ if (ArgsV.back() == 0) return 0;
+ }
+
+ return Builder.CreateCall(CalleeF, ArgsV, "calltmp");
+}
+
+Value *IfExprAST::Codegen() {
+ Value *CondV = Cond-&gt;Codegen();
+ if (CondV == 0) return 0;
+
+ // Convert condition to a bool by comparing equal to 0.0.
+ CondV = Builder.CreateFCmpONE(CondV,
+ ConstantFP::get(getGlobalContext(), APFloat(0.0)),
+ "ifcond");
+
+ Function *TheFunction = Builder.GetInsertBlock()-&gt;getParent();
+
+ // Create blocks for the then and else cases. Insert the 'then' block at the
+ // end of the function.
+ BasicBlock *ThenBB = BasicBlock::Create(getGlobalContext(), "then", TheFunction);
+ BasicBlock *ElseBB = BasicBlock::Create(getGlobalContext(), "else");
+ BasicBlock *MergeBB = BasicBlock::Create(getGlobalContext(), "ifcont");
+
+ Builder.CreateCondBr(CondV, ThenBB, ElseBB);
+
+ // Emit then value.
+ Builder.SetInsertPoint(ThenBB);
+
+ Value *ThenV = Then-&gt;Codegen();
+ if (ThenV == 0) return 0;
+
+ Builder.CreateBr(MergeBB);
+ // Codegen of 'Then' can change the current block, update ThenBB for the PHI.
+ ThenBB = Builder.GetInsertBlock();
+
+ // Emit else block.
+ TheFunction-&gt;getBasicBlockList().push_back(ElseBB);
+ Builder.SetInsertPoint(ElseBB);
+
+ Value *ElseV = Else-&gt;Codegen();
+ if (ElseV == 0) return 0;
+
+ Builder.CreateBr(MergeBB);
+ // Codegen of 'Else' can change the current block, update ElseBB for the PHI.
+ ElseBB = Builder.GetInsertBlock();
+
+ // Emit merge block.
+ TheFunction-&gt;getBasicBlockList().push_back(MergeBB);
+ Builder.SetInsertPoint(MergeBB);
+ PHINode *PN = Builder.CreatePHI(Type::getDoubleTy(getGlobalContext()), 2,
+ "iftmp");
+
+ PN-&gt;addIncoming(ThenV, ThenBB);
+ PN-&gt;addIncoming(ElseV, ElseBB);
+ return PN;
+}
+
+Value *ForExprAST::Codegen() {
+ // Output this as:
+ // ...
+ // start = startexpr
+ // goto loop
+ // loop:
+ // variable = phi [start, loopheader], [nextvariable, loopend]
+ // ...
+ // bodyexpr
+ // ...
+ // loopend:
+ // step = stepexpr
+ // nextvariable = variable + step
+ // endcond = endexpr
+ // br endcond, loop, endloop
+ // outloop:
+
+ // Emit the start code first, without 'variable' in scope.
+ Value *StartVal = Start-&gt;Codegen();
+ if (StartVal == 0) return 0;
+
+ // Make the new basic block for the loop header, inserting after current
+ // block.
+ Function *TheFunction = Builder.GetInsertBlock()-&gt;getParent();
+ BasicBlock *PreheaderBB = Builder.GetInsertBlock();
+ BasicBlock *LoopBB = BasicBlock::Create(getGlobalContext(), "loop", TheFunction);
+
+ // Insert an explicit fall through from the current block to the LoopBB.
+ Builder.CreateBr(LoopBB);
+
+ // Start insertion in LoopBB.
+ Builder.SetInsertPoint(LoopBB);
+
+ // Start the PHI node with an entry for Start.
+ PHINode *Variable = Builder.CreatePHI(Type::getDoubleTy(getGlobalContext()), 2, VarName.c_str());
+ Variable-&gt;addIncoming(StartVal, PreheaderBB);
+
+ // Within the loop, the variable is defined equal to the PHI node. If it
+ // shadows an existing variable, we have to restore it, so save it now.
+ Value *OldVal = NamedValues[VarName];
+ NamedValues[VarName] = Variable;
+
+ // Emit the body of the loop. This, like any other expr, can change the
+ // current BB. Note that we ignore the value computed by the body, but don't
+ // allow an error.
+ if (Body-&gt;Codegen() == 0)
+ return 0;
+
+ // Emit the step value.
+ Value *StepVal;
+ if (Step) {
+ StepVal = Step-&gt;Codegen();
+ if (StepVal == 0) return 0;
+ } else {
+ // If not specified, use 1.0.
+ StepVal = ConstantFP::get(getGlobalContext(), APFloat(1.0));
+ }
+
+ Value *NextVar = Builder.CreateFAdd(Variable, StepVal, "nextvar");
+
+ // Compute the end condition.
+ Value *EndCond = End-&gt;Codegen();
+ if (EndCond == 0) return EndCond;
+
+ // Convert condition to a bool by comparing equal to 0.0.
+ EndCond = Builder.CreateFCmpONE(EndCond,
+ ConstantFP::get(getGlobalContext(), APFloat(0.0)),
+ "loopcond");
+
+ // Create the "after loop" block and insert it.
+ BasicBlock *LoopEndBB = Builder.GetInsertBlock();
+ BasicBlock *AfterBB = BasicBlock::Create(getGlobalContext(), "afterloop", TheFunction);
+
+ // Insert the conditional branch into the end of LoopEndBB.
+ Builder.CreateCondBr(EndCond, LoopBB, AfterBB);
+
+ // Any new code will be inserted in AfterBB.
+ Builder.SetInsertPoint(AfterBB);
+
+ // Add a new entry to the PHI node for the backedge.
+ Variable-&gt;addIncoming(NextVar, LoopEndBB);
+
+ // Restore the unshadowed variable.
+ if (OldVal)
+ NamedValues[VarName] = OldVal;
+ else
+ NamedValues.erase(VarName);
+
+
+ // for expr always returns 0.0.
+ return Constant::getNullValue(Type::getDoubleTy(getGlobalContext()));
+}
+
+Function *PrototypeAST::Codegen() {
+ // Make the function type: double(double,double) etc.
+ std::vector&lt;Type*&gt; Doubles(Args.size(),
+ Type::getDoubleTy(getGlobalContext()));
+ FunctionType *FT = FunctionType::get(Type::getDoubleTy(getGlobalContext()),
+ Doubles, false);
+
+ Function *F = Function::Create(FT, Function::ExternalLinkage, Name, TheModule);
+
+ // If F conflicted, there was already something named 'Name'. If it has a
+ // body, don't allow redefinition or reextern.
+ if (F-&gt;getName() != Name) {
+ // Delete the one we just made and get the existing one.
+ F-&gt;eraseFromParent();
+ F = TheModule-&gt;getFunction(Name);
+
+ // If F already has a body, reject this.
+ if (!F-&gt;empty()) {
+ ErrorF("redefinition of function");
+ return 0;
+ }
+
+ // If F took a different number of args, reject.
+ if (F-&gt;arg_size() != Args.size()) {
+ ErrorF("redefinition of function with different # args");
+ return 0;
+ }
+ }
+
+ // Set names for all arguments.
+ unsigned Idx = 0;
+ for (Function::arg_iterator AI = F-&gt;arg_begin(); Idx != Args.size();
+ ++AI, ++Idx) {
+ AI-&gt;setName(Args[Idx]);
+
+ // Add arguments to variable symbol table.
+ NamedValues[Args[Idx]] = AI;
+ }
+
+ return F;
+}
+
+Function *FunctionAST::Codegen() {
+ NamedValues.clear();
+
+ Function *TheFunction = Proto-&gt;Codegen();
+ if (TheFunction == 0)
+ return 0;
+
+ // If this is an operator, install it.
+ if (Proto-&gt;isBinaryOp())
+ BinopPrecedence[Proto-&gt;getOperatorName()] = Proto-&gt;getBinaryPrecedence();
+
+ // Create a new basic block to start insertion into.
+ BasicBlock *BB = BasicBlock::Create(getGlobalContext(), "entry", TheFunction);
+ Builder.SetInsertPoint(BB);
+
+ if (Value *RetVal = Body-&gt;Codegen()) {
+ // Finish off the function.
+ Builder.CreateRet(RetVal);
+
+ // Validate the generated code, checking for consistency.
+ verifyFunction(*TheFunction);
+
+ // Optimize the function.
+ TheFPM-&gt;run(*TheFunction);
+
+ return TheFunction;
+ }
+
+ // Error reading body, remove function.
+ TheFunction-&gt;eraseFromParent();
+
+ if (Proto-&gt;isBinaryOp())
+ BinopPrecedence.erase(Proto-&gt;getOperatorName());
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Top-Level parsing and JIT Driver
+//===----------------------------------------------------------------------===//
+
+static ExecutionEngine *TheExecutionEngine;
+
+static void HandleDefinition() {
+ if (FunctionAST *F = ParseDefinition()) {
+ if (Function *LF = F-&gt;Codegen()) {
+ fprintf(stderr, "Read function definition:");
+ LF-&gt;dump();
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+static void HandleExtern() {
+ if (PrototypeAST *P = ParseExtern()) {
+ if (Function *F = P-&gt;Codegen()) {
+ fprintf(stderr, "Read extern: ");
+ F-&gt;dump();
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+static void HandleTopLevelExpression() {
+ // Evaluate a top-level expression into an anonymous function.
+ if (FunctionAST *F = ParseTopLevelExpr()) {
+ if (Function *LF = F-&gt;Codegen()) {
+ // JIT the function, returning a function pointer.
+ void *FPtr = TheExecutionEngine-&gt;getPointerToFunction(LF);
+
+ // Cast it to the right type (takes no arguments, returns a double) so we
+ // can call it as a native function.
+ double (*FP)() = (double (*)())(intptr_t)FPtr;
+ fprintf(stderr, "Evaluated to %f\n", FP());
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+/// top ::= definition | external | expression | ';'
+static void MainLoop() {
+ while (1) {
+ fprintf(stderr, "ready&gt; ");
+ switch (CurTok) {
+ case tok_eof: return;
+ case ';': getNextToken(); break; // ignore top-level semicolons.
+ case tok_def: HandleDefinition(); break;
+ case tok_extern: HandleExtern(); break;
+ default: HandleTopLevelExpression(); break;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// "Library" functions that can be "extern'd" from user code.
+//===----------------------------------------------------------------------===//
+
+/// putchard - putchar that takes a double and returns 0.
+extern "C"
+double putchard(double X) {
+ putchar((char)X);
+ return 0;
+}
+
+/// printd - printf that takes a double prints it as "%f\n", returning 0.
+extern "C"
+double printd(double X) {
+ printf("%f\n", X);
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Main driver code.
+//===----------------------------------------------------------------------===//
+
+int main() {
+ InitializeNativeTarget();
+ LLVMContext &amp;Context = getGlobalContext();
+
+ // Install standard binary operators.
+ // 1 is lowest precedence.
+ BinopPrecedence['&lt;'] = 10;
+ BinopPrecedence['+'] = 20;
+ BinopPrecedence['-'] = 20;
+ BinopPrecedence['*'] = 40; // highest.
+
+ // Prime the first token.
+ fprintf(stderr, "ready&gt; ");
+ getNextToken();
+
+ // Make the module, which holds all the code.
+ TheModule = new Module("my cool jit", Context);
+
+ // Create the JIT. This takes ownership of the module.
+ std::string ErrStr;
+ TheExecutionEngine = EngineBuilder(TheModule).setErrorStr(&amp;ErrStr).create();
+ if (!TheExecutionEngine) {
+ fprintf(stderr, "Could not create ExecutionEngine: %s\n", ErrStr.c_str());
+ exit(1);
+ }
+
+ FunctionPassManager OurFPM(TheModule);
+
+ // Set up the optimizer pipeline. Start with registering info about how the
+ // target lays out data structures.
+ OurFPM.add(new TargetData(*TheExecutionEngine-&gt;getTargetData()));
+ // Provide basic AliasAnalysis support for GVN.
+ OurFPM.add(createBasicAliasAnalysisPass());
+ // Do simple "peephole" optimizations and bit-twiddling optzns.
+ OurFPM.add(createInstructionCombiningPass());
+ // Reassociate expressions.
+ OurFPM.add(createReassociatePass());
+ // Eliminate Common SubExpressions.
+ OurFPM.add(createGVNPass());
+ // Simplify the control flow graph (deleting unreachable blocks, etc).
+ OurFPM.add(createCFGSimplificationPass());
+
+ OurFPM.doInitialization();
+
+ // Set the global so the code gen can use this.
+ TheFPM = &amp;OurFPM;
+
+ // Run the main "interpreter loop" now.
+ MainLoop();
+
+ TheFPM = 0;
+
+ // Print out all of the generated code.
+ TheModule-&gt;dump();
+
+ return 0;
+}
+</pre>
+</div>
+
+<a href="LangImpl7.html">Next: Extending the language: mutable variables / SSA construction</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/LangImpl7.html b/docs/tutorial/LangImpl7.html
new file mode 100644
index 00000000000..08c0c716b6f
--- /dev/null
+++ b/docs/tutorial/LangImpl7.html
@@ -0,0 +1,2164 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Extending the Language: Mutable Variables / SSA
+ construction</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Extending the Language: Mutable Variables</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 7
+ <ol>
+ <li><a href="#intro">Chapter 7 Introduction</a></li>
+ <li><a href="#why">Why is this a hard problem?</a></li>
+ <li><a href="#memory">Memory in LLVM</a></li>
+ <li><a href="#kalvars">Mutable Variables in Kaleidoscope</a></li>
+ <li><a href="#adjustments">Adjusting Existing Variables for
+ Mutation</a></li>
+ <li><a href="#assignment">New Assignment Operator</a></li>
+ <li><a href="#localvars">User-defined Local Variables</a></li>
+ <li><a href="#code">Full Code Listing</a></li>
+ </ol>
+</li>
+<li><a href="LangImpl8.html">Chapter 8</a>: Conclusion and other useful LLVM
+ tidbits</li>
+</ul>
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Chapter 7 Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to Chapter 7 of the "<a href="index.html">Implementing a language
+with LLVM</a>" tutorial. In chapters 1 through 6, we've built a very
+respectable, albeit simple, <a
+href="http://en.wikipedia.org/wiki/Functional_programming">functional
+programming language</a>. In our journey, we learned some parsing techniques,
+how to build and represent an AST, how to build LLVM IR, and how to optimize
+the resultant code as well as JIT compile it.</p>
+
+<p>While Kaleidoscope is interesting as a functional language, the fact that it
+is functional makes it "too easy" to generate LLVM IR for it. In particular, a
+functional language makes it very easy to build LLVM IR directly in <a
+href="http://en.wikipedia.org/wiki/Static_single_assignment_form">SSA form</a>.
+Since LLVM requires that the input code be in SSA form, this is a very nice
+property and it is often unclear to newcomers how to generate code for an
+imperative language with mutable variables.</p>
+
+<p>The short (and happy) summary of this chapter is that there is no need for
+your front-end to build SSA form: LLVM provides highly tuned and well tested
+support for this, though the way it works is a bit unexpected for some.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="why">Why is this a hard problem?</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+To understand why mutable variables cause complexities in SSA construction,
+consider this extremely simple C example:
+</p>
+
+<div class="doc_code">
+<pre>
+int G, H;
+int test(_Bool Condition) {
+ int X;
+ if (Condition)
+ X = G;
+ else
+ X = H;
+ return X;
+}
+</pre>
+</div>
+
+<p>In this case, we have the variable "X", whose value depends on the path
+executed in the program. Because there are two different possible values for X
+before the return instruction, a PHI node is inserted to merge the two values.
+The LLVM IR that we want for this example looks like this:</p>
+
+<div class="doc_code">
+<pre>
+@G = weak global i32 0 ; type of @G is i32*
+@H = weak global i32 0 ; type of @H is i32*
+
+define i32 @test(i1 %Condition) {
+entry:
+ br i1 %Condition, label %cond_true, label %cond_false
+
+cond_true:
+ %X.0 = load i32* @G
+ br label %cond_next
+
+cond_false:
+ %X.1 = load i32* @H
+ br label %cond_next
+
+cond_next:
+ %X.2 = phi i32 [ %X.1, %cond_false ], [ %X.0, %cond_true ]
+ ret i32 %X.2
+}
+</pre>
+</div>
+
+<p>In this example, the loads from the G and H global variables are explicit in
+the LLVM IR, and they live in the then/else branches of the if statement
+(cond_true/cond_false). In order to merge the incoming values, the X.2 phi node
+in the cond_next block selects the right value to use based on where control
+flow is coming from: if control flow comes from the cond_false block, X.2 gets
+the value of X.1. Alternatively, if control flow comes from cond_true, it gets
+the value of X.0. The intent of this chapter is not to explain the details of
+SSA form. For more information, see one of the many <a
+href="http://en.wikipedia.org/wiki/Static_single_assignment_form">online
+references</a>.</p>
+
+<p>The question for this article is "who places the phi nodes when lowering
+assignments to mutable variables?". The issue here is that LLVM
+<em>requires</em> that its IR be in SSA form: there is no "non-ssa" mode for it.
+However, SSA construction requires non-trivial algorithms and data structures,
+so it is inconvenient and wasteful for every front-end to have to reproduce this
+logic.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="memory">Memory in LLVM</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The 'trick' here is that while LLVM does require all register values to be
+in SSA form, it does not require (or permit) memory objects to be in SSA form.
+In the example above, note that the loads from G and H are direct accesses to
+G and H: they are not renamed or versioned. This differs from some other
+compiler systems, which do try to version memory objects. In LLVM, instead of
+encoding dataflow analysis of memory into the LLVM IR, it is handled with <a
+href="../WritingAnLLVMPass.html">Analysis Passes</a> which are computed on
+demand.</p>
+
+<p>
+With this in mind, the high-level idea is that we want to make a stack variable
+(which lives in memory, because it is on the stack) for each mutable object in
+a function. To take advantage of this trick, we need to talk about how LLVM
+represents stack variables.
+</p>
+
+<p>In LLVM, all memory accesses are explicit with load/store instructions, and
+it is carefully designed not to have (or need) an "address-of" operator. Notice
+how the type of the @G/@H global variables is actually "i32*" even though the
+variable is defined as "i32". What this means is that @G defines <em>space</em>
+for an i32 in the global data area, but its <em>name</em> actually refers to the
+address for that space. Stack variables work the same way, except that instead of
+being declared with global variable definitions, they are declared with the
+<a href="../LangRef.html#i_alloca">LLVM alloca instruction</a>:</p>
+
+<div class="doc_code">
+<pre>
+define i32 @example() {
+entry:
+ %X = alloca i32 ; type of %X is i32*.
+ ...
+ %tmp = load i32* %X ; load the stack value %X from the stack.
+ %tmp2 = add i32 %tmp, 1 ; increment it
+ store i32 %tmp2, i32* %X ; store it back
+ ...
+</pre>
+</div>
+
+<p>This code shows an example of how you can declare and manipulate a stack
+variable in the LLVM IR. Stack memory allocated with the alloca instruction is
+fully general: you can pass the address of the stack slot to functions, you can
+store it in other variables, etc. In our example above, we could rewrite the
+example to use the alloca technique to avoid using a PHI node:</p>
+
+<div class="doc_code">
+<pre>
+@G = weak global i32 0 ; type of @G is i32*
+@H = weak global i32 0 ; type of @H is i32*
+
+define i32 @test(i1 %Condition) {
+entry:
+ %X = alloca i32 ; type of %X is i32*.
+ br i1 %Condition, label %cond_true, label %cond_false
+
+cond_true:
+ %X.0 = load i32* @G
+ store i32 %X.0, i32* %X ; Update X
+ br label %cond_next
+
+cond_false:
+ %X.1 = load i32* @H
+ store i32 %X.1, i32* %X ; Update X
+ br label %cond_next
+
+cond_next:
+ %X.2 = load i32* %X ; Read X
+ ret i32 %X.2
+}
+</pre>
+</div>
+
+<p>With this, we have discovered a way to handle arbitrary mutable variables
+without the need to create Phi nodes at all:</p>
+
+<ol>
+<li>Each mutable variable becomes a stack allocation.</li>
+<li>Each read of the variable becomes a load from the stack.</li>
+<li>Each update of the variable becomes a store to the stack.</li>
+<li>Taking the address of a variable just uses the stack address directly.</li>
+</ol>
+
+<p>While this solution has solved our immediate problem, it introduced another
+one: we have now apparently introduced a lot of stack traffic for very simple
+and common operations, a major performance problem. Fortunately for us, the
+LLVM optimizer has a highly-tuned optimization pass named "mem2reg" that handles
+this case, promoting allocas like this into SSA registers, inserting Phi nodes
+as appropriate. If you run this example through the pass, for example, you'll
+get:</p>
+
+<div class="doc_code">
+<pre>
+$ <b>llvm-as &lt; example.ll | opt -mem2reg | llvm-dis</b>
+@G = weak global i32 0
+@H = weak global i32 0
+
+define i32 @test(i1 %Condition) {
+entry:
+ br i1 %Condition, label %cond_true, label %cond_false
+
+cond_true:
+ %X.0 = load i32* @G
+ br label %cond_next
+
+cond_false:
+ %X.1 = load i32* @H
+ br label %cond_next
+
+cond_next:
+ %X.01 = phi i32 [ %X.1, %cond_false ], [ %X.0, %cond_true ]
+ ret i32 %X.01
+}
+</pre>
+</div>
+
+<p>The mem2reg pass implements the standard "iterated dominance frontier"
+algorithm for constructing SSA form and has a number of optimizations that speed
+up (very common) degenerate cases. The mem2reg optimization pass is the answer to dealing
+with mutable variables, and we highly recommend that you depend on it. Note that
+mem2reg only works on variables in certain circumstances:</p>
+
+<ol>
+<li>mem2reg is alloca-driven: it looks for allocas and if it can handle them, it
+promotes them. It does not apply to global variables or heap allocations.</li>
+
+<li>mem2reg only looks for alloca instructions in the entry block of the
+function. Being in the entry block guarantees that the alloca is only executed
+once, which makes analysis simpler.</li>
+
+<li>mem2reg only promotes allocas whose uses are direct loads and stores. If
+the address of the stack object is passed to a function, or if any funny pointer
+arithmetic is involved, the alloca will not be promoted.</li>
+
+<li>mem2reg only works on allocas of <a
+href="../LangRef.html#t_classifications">first class</a>
+values (such as pointers, scalars and vectors), and only if the array size
+of the allocation is 1 (or missing in the .ll file). mem2reg is not capable of
+promoting structs or arrays to registers. Note that the "scalarrepl" pass is
+more powerful and can promote structs, "unions", and arrays in many cases.</li>
+
+</ol>
+
+<p>
+All of these properties are easy to satisfy for most imperative languages, and
+we'll illustrate it below with Kaleidoscope. The final question you may be
+asking is: should I bother with this nonsense for my front-end? Wouldn't it be
+better if I just did SSA construction directly, avoiding use of the mem2reg
+optimization pass? In short, we strongly recommend that you use this technique
+for building SSA form, unless there is an extremely good reason not to. Using
+this technique is:</p>
+
+<ul>
+<li>Proven and well tested: llvm-gcc and clang both use this technique for local
+mutable variables. As such, the most common clients of LLVM are using this to
+handle a bulk of their variables. You can be sure that bugs are found fast and
+fixed early.</li>
+
+<li>Extremely Fast: mem2reg has a number of special cases that make it fast in
+common cases as well as fully general. For example, it has fast-paths for
+variables that are only used in a single block, variables that only have one
+assignment point, good heuristics to avoid insertion of unneeded phi nodes, etc.
+</li>
+
+<li>Needed for debug info generation: <a href="../SourceLevelDebugging.html">
+Debug information in LLVM</a> relies on having the address of the variable
+exposed so that debug info can be attached to it. This technique dovetails
+very naturally with this style of debug info.</li>
+</ul>
+
+<p>If nothing else, this makes it much easier to get your front-end up and
+running, and is very simple to implement. Lets extend Kaleidoscope with mutable
+variables now!
+</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="kalvars">Mutable Variables in Kaleidoscope</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Now that we know the sort of problem we want to tackle, lets see what this
+looks like in the context of our little Kaleidoscope language. We're going to
+add two features:</p>
+
+<ol>
+<li>The ability to mutate variables with the '=' operator.</li>
+<li>The ability to define new variables.</li>
+</ol>
+
+<p>While the first item is really what this is about, we only have variables
+for incoming arguments as well as for induction variables, and redefining those only
+goes so far :). Also, the ability to define new variables is a
+useful thing regardless of whether you will be mutating them. Here's a
+motivating example that shows how we could use these:</p>
+
+<div class="doc_code">
+<pre>
+# Define ':' for sequencing: as a low-precedence operator that ignores operands
+# and just returns the RHS.
+def binary : 1 (x y) y;
+
+# Recursive fib, we could do this before.
+def fib(x)
+ if (x &lt; 3) then
+ 1
+ else
+ fib(x-1)+fib(x-2);
+
+# Iterative fib.
+def fibi(x)
+ <b>var a = 1, b = 1, c in</b>
+ (for i = 3, i &lt; x in
+ <b>c = a + b</b> :
+ <b>a = b</b> :
+ <b>b = c</b>) :
+ b;
+
+# Call it.
+fibi(10);
+</pre>
+</div>
+
+<p>
+In order to mutate variables, we have to change our existing variables to use
+the "alloca trick". Once we have that, we'll add our new operator, then extend
+Kaleidoscope to support new variable definitions.
+</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="adjustments">Adjusting Existing Variables for Mutation</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+The symbol table in Kaleidoscope is managed at code generation time by the
+'<tt>NamedValues</tt>' map. This map currently keeps track of the LLVM "Value*"
+that holds the double value for the named variable. In order to support
+mutation, we need to change this slightly, so that it <tt>NamedValues</tt> holds
+the <em>memory location</em> of the variable in question. Note that this
+change is a refactoring: it changes the structure of the code, but does not
+(by itself) change the behavior of the compiler. All of these changes are
+isolated in the Kaleidoscope code generator.</p>
+
+<p>
+At this point in Kaleidoscope's development, it only supports variables for two
+things: incoming arguments to functions and the induction variable of 'for'
+loops. For consistency, we'll allow mutation of these variables in addition to
+other user-defined variables. This means that these will both need memory
+locations.
+</p>
+
+<p>To start our transformation of Kaleidoscope, we'll change the NamedValues
+map so that it maps to AllocaInst* instead of Value*. Once we do this, the C++
+compiler will tell us what parts of the code we need to update:</p>
+
+<div class="doc_code">
+<pre>
+static std::map&lt;std::string, AllocaInst*&gt; NamedValues;
+</pre>
+</div>
+
+<p>Also, since we will need to create these alloca's, we'll use a helper
+function that ensures that the allocas are created in the entry block of the
+function:</p>
+
+<div class="doc_code">
+<pre>
+/// CreateEntryBlockAlloca - Create an alloca instruction in the entry block of
+/// the function. This is used for mutable variables etc.
+static AllocaInst *CreateEntryBlockAlloca(Function *TheFunction,
+ const std::string &amp;VarName) {
+ IRBuilder&lt;&gt; TmpB(&amp;TheFunction-&gt;getEntryBlock(),
+ TheFunction-&gt;getEntryBlock().begin());
+ return TmpB.CreateAlloca(Type::getDoubleTy(getGlobalContext()), 0,
+ VarName.c_str());
+}
+</pre>
+</div>
+
+<p>This funny looking code creates an IRBuilder object that is pointing at
+the first instruction (.begin()) of the entry block. It then creates an alloca
+with the expected name and returns it. Because all values in Kaleidoscope are
+doubles, there is no need to pass in a type to use.</p>
+
+<p>With this in place, the first functionality change we want to make is to
+variable references. In our new scheme, variables live on the stack, so code
+generating a reference to them actually needs to produce a load from the stack
+slot:</p>
+
+<div class="doc_code">
+<pre>
+Value *VariableExprAST::Codegen() {
+ // Look this variable up in the function.
+ Value *V = NamedValues[Name];
+ if (V == 0) return ErrorV("Unknown variable name");
+
+ <b>// Load the value.
+ return Builder.CreateLoad(V, Name.c_str());</b>
+}
+</pre>
+</div>
+
+<p>As you can see, this is pretty straightforward. Now we need to update the
+things that define the variables to set up the alloca. We'll start with
+<tt>ForExprAST::Codegen</tt> (see the <a href="#code">full code listing</a> for
+the unabridged code):</p>
+
+<div class="doc_code">
+<pre>
+ Function *TheFunction = Builder.GetInsertBlock()->getParent();
+
+ <b>// Create an alloca for the variable in the entry block.
+ AllocaInst *Alloca = CreateEntryBlockAlloca(TheFunction, VarName);</b>
+
+ // Emit the start code first, without 'variable' in scope.
+ Value *StartVal = Start-&gt;Codegen();
+ if (StartVal == 0) return 0;
+
+ <b>// Store the value into the alloca.
+ Builder.CreateStore(StartVal, Alloca);</b>
+ ...
+
+ // Compute the end condition.
+ Value *EndCond = End-&gt;Codegen();
+ if (EndCond == 0) return EndCond;
+
+ <b>// Reload, increment, and restore the alloca. This handles the case where
+ // the body of the loop mutates the variable.
+ Value *CurVar = Builder.CreateLoad(Alloca);
+ Value *NextVar = Builder.CreateFAdd(CurVar, StepVal, "nextvar");
+ Builder.CreateStore(NextVar, Alloca);</b>
+ ...
+</pre>
+</div>
+
+<p>This code is virtually identical to the code <a
+href="LangImpl5.html#forcodegen">before we allowed mutable variables</a>. The
+big difference is that we no longer have to construct a PHI node, and we use
+load/store to access the variable as needed.</p>
+
+<p>To support mutable argument variables, we need to also make allocas for them.
+The code for this is also pretty simple:</p>
+
+<div class="doc_code">
+<pre>
+/// CreateArgumentAllocas - Create an alloca for each argument and register the
+/// argument in the symbol table so that references to it will succeed.
+void PrototypeAST::CreateArgumentAllocas(Function *F) {
+ Function::arg_iterator AI = F-&gt;arg_begin();
+ for (unsigned Idx = 0, e = Args.size(); Idx != e; ++Idx, ++AI) {
+ // Create an alloca for this variable.
+ AllocaInst *Alloca = CreateEntryBlockAlloca(F, Args[Idx]);
+
+ // Store the initial value into the alloca.
+ Builder.CreateStore(AI, Alloca);
+
+ // Add arguments to variable symbol table.
+ NamedValues[Args[Idx]] = Alloca;
+ }
+}
+</pre>
+</div>
+
+<p>For each argument, we make an alloca, store the input value to the function
+into the alloca, and register the alloca as the memory location for the
+argument. This method gets invoked by <tt>FunctionAST::Codegen</tt> right after
+it sets up the entry block for the function.</p>
+
+<p>The final missing piece is adding the mem2reg pass, which allows us to get
+good codegen once again:</p>
+
+<div class="doc_code">
+<pre>
+ // Set up the optimizer pipeline. Start with registering info about how the
+ // target lays out data structures.
+ OurFPM.add(new TargetData(*TheExecutionEngine-&gt;getTargetData()));
+ <b>// Promote allocas to registers.
+ OurFPM.add(createPromoteMemoryToRegisterPass());</b>
+ // Do simple "peephole" optimizations and bit-twiddling optzns.
+ OurFPM.add(createInstructionCombiningPass());
+ // Reassociate expressions.
+ OurFPM.add(createReassociatePass());
+</pre>
+</div>
+
+<p>It is interesting to see what the code looks like before and after the
+mem2reg optimization runs. For example, this is the before/after code for our
+recursive fib function. Before the optimization:</p>
+
+<div class="doc_code">
+<pre>
+define double @fib(double %x) {
+entry:
+ <b>%x1 = alloca double
+ store double %x, double* %x1
+ %x2 = load double* %x1</b>
+ %cmptmp = fcmp ult double %x2, 3.000000e+00
+ %booltmp = uitofp i1 %cmptmp to double
+ %ifcond = fcmp one double %booltmp, 0.000000e+00
+ br i1 %ifcond, label %then, label %else
+
+then: ; preds = %entry
+ br label %ifcont
+
+else: ; preds = %entry
+ <b>%x3 = load double* %x1</b>
+ %subtmp = fsub double %x3, 1.000000e+00
+ %calltmp = call double @fib(double %subtmp)
+ <b>%x4 = load double* %x1</b>
+ %subtmp5 = fsub double %x4, 2.000000e+00
+ %calltmp6 = call double @fib(double %subtmp5)
+ %addtmp = fadd double %calltmp, %calltmp6
+ br label %ifcont
+
+ifcont: ; preds = %else, %then
+ %iftmp = phi double [ 1.000000e+00, %then ], [ %addtmp, %else ]
+ ret double %iftmp
+}
+</pre>
+</div>
+
+<p>Here there is only one variable (x, the input argument) but you can still
+see the extremely simple-minded code generation strategy we are using. In the
+entry block, an alloca is created, and the initial input value is stored into
+it. Each reference to the variable does a reload from the stack. Also, note
+that we didn't modify the if/then/else expression, so it still inserts a PHI
+node. While we could make an alloca for it, it is actually easier to create a
+PHI node for it, so we still just make the PHI.</p>
+
+<p>Here is the code after the mem2reg pass runs:</p>
+
+<div class="doc_code">
+<pre>
+define double @fib(double %x) {
+entry:
+ %cmptmp = fcmp ult double <b>%x</b>, 3.000000e+00
+ %booltmp = uitofp i1 %cmptmp to double
+ %ifcond = fcmp one double %booltmp, 0.000000e+00
+ br i1 %ifcond, label %then, label %else
+
+then:
+ br label %ifcont
+
+else:
+ %subtmp = fsub double <b>%x</b>, 1.000000e+00
+ %calltmp = call double @fib(double %subtmp)
+ %subtmp5 = fsub double <b>%x</b>, 2.000000e+00
+ %calltmp6 = call double @fib(double %subtmp5)
+ %addtmp = fadd double %calltmp, %calltmp6
+ br label %ifcont
+
+ifcont: ; preds = %else, %then
+ %iftmp = phi double [ 1.000000e+00, %then ], [ %addtmp, %else ]
+ ret double %iftmp
+}
+</pre>
+</div>
+
+<p>This is a trivial case for mem2reg, since there are no redefinitions of the
+variable. The point of showing this is to calm your tension about inserting
+such blatent inefficiencies :).</p>
+
+<p>After the rest of the optimizers run, we get:</p>
+
+<div class="doc_code">
+<pre>
+define double @fib(double %x) {
+entry:
+ %cmptmp = fcmp ult double %x, 3.000000e+00
+ %booltmp = uitofp i1 %cmptmp to double
+ %ifcond = fcmp ueq double %booltmp, 0.000000e+00
+ br i1 %ifcond, label %else, label %ifcont
+
+else:
+ %subtmp = fsub double %x, 1.000000e+00
+ %calltmp = call double @fib(double %subtmp)
+ %subtmp5 = fsub double %x, 2.000000e+00
+ %calltmp6 = call double @fib(double %subtmp5)
+ %addtmp = fadd double %calltmp, %calltmp6
+ ret double %addtmp
+
+ifcont:
+ ret double 1.000000e+00
+}
+</pre>
+</div>
+
+<p>Here we see that the simplifycfg pass decided to clone the return instruction
+into the end of the 'else' block. This allowed it to eliminate some branches
+and the PHI node.</p>
+
+<p>Now that all symbol table references are updated to use stack variables,
+we'll add the assignment operator.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="assignment">New Assignment Operator</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>With our current framework, adding a new assignment operator is really
+simple. We will parse it just like any other binary operator, but handle it
+internally (instead of allowing the user to define it). The first step is to
+set a precedence:</p>
+
+<div class="doc_code">
+<pre>
+ int main() {
+ // Install standard binary operators.
+ // 1 is lowest precedence.
+ <b>BinopPrecedence['='] = 2;</b>
+ BinopPrecedence['&lt;'] = 10;
+ BinopPrecedence['+'] = 20;
+ BinopPrecedence['-'] = 20;
+</pre>
+</div>
+
+<p>Now that the parser knows the precedence of the binary operator, it takes
+care of all the parsing and AST generation. We just need to implement codegen
+for the assignment operator. This looks like:</p>
+
+<div class="doc_code">
+<pre>
+Value *BinaryExprAST::Codegen() {
+ // Special case '=' because we don't want to emit the LHS as an expression.
+ if (Op == '=') {
+ // Assignment requires the LHS to be an identifier.
+ VariableExprAST *LHSE = dynamic_cast&lt;VariableExprAST*&gt;(LHS);
+ if (!LHSE)
+ return ErrorV("destination of '=' must be a variable");
+</pre>
+</div>
+
+<p>Unlike the rest of the binary operators, our assignment operator doesn't
+follow the "emit LHS, emit RHS, do computation" model. As such, it is handled
+as a special case before the other binary operators are handled. The other
+strange thing is that it requires the LHS to be a variable. It is invalid to
+have "(x+1) = expr" - only things like "x = expr" are allowed.
+</p>
+
+<div class="doc_code">
+<pre>
+ // Codegen the RHS.
+ Value *Val = RHS-&gt;Codegen();
+ if (Val == 0) return 0;
+
+ // Look up the name.
+ Value *Variable = NamedValues[LHSE-&gt;getName()];
+ if (Variable == 0) return ErrorV("Unknown variable name");
+
+ Builder.CreateStore(Val, Variable);
+ return Val;
+ }
+ ...
+</pre>
+</div>
+
+<p>Once we have the variable, codegen'ing the assignment is straightforward:
+we emit the RHS of the assignment, create a store, and return the computed
+value. Returning a value allows for chained assignments like "X = (Y = Z)".</p>
+
+<p>Now that we have an assignment operator, we can mutate loop variables and
+arguments. For example, we can now run code like this:</p>
+
+<div class="doc_code">
+<pre>
+# Function to print a double.
+extern printd(x);
+
+# Define ':' for sequencing: as a low-precedence operator that ignores operands
+# and just returns the RHS.
+def binary : 1 (x y) y;
+
+def test(x)
+ printd(x) :
+ x = 4 :
+ printd(x);
+
+test(123);
+</pre>
+</div>
+
+<p>When run, this example prints "123" and then "4", showing that we did
+actually mutate the value! Okay, we have now officially implemented our goal:
+getting this to work requires SSA construction in the general case. However,
+to be really useful, we want the ability to define our own local variables, lets
+add this next!
+</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="localvars">User-defined Local Variables</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Adding var/in is just like any other other extensions we made to
+Kaleidoscope: we extend the lexer, the parser, the AST and the code generator.
+The first step for adding our new 'var/in' construct is to extend the lexer.
+As before, this is pretty trivial, the code looks like this:</p>
+
+<div class="doc_code">
+<pre>
+enum Token {
+ ...
+ <b>// var definition
+ tok_var = -13</b>
+...
+}
+...
+static int gettok() {
+...
+ if (IdentifierStr == "in") return tok_in;
+ if (IdentifierStr == "binary") return tok_binary;
+ if (IdentifierStr == "unary") return tok_unary;
+ <b>if (IdentifierStr == "var") return tok_var;</b>
+ return tok_identifier;
+...
+</pre>
+</div>
+
+<p>The next step is to define the AST node that we will construct. For var/in,
+it looks like this:</p>
+
+<div class="doc_code">
+<pre>
+/// VarExprAST - Expression class for var/in
+class VarExprAST : public ExprAST {
+ std::vector&lt;std::pair&lt;std::string, ExprAST*&gt; &gt; VarNames;
+ ExprAST *Body;
+public:
+ VarExprAST(const std::vector&lt;std::pair&lt;std::string, ExprAST*&gt; &gt; &amp;varnames,
+ ExprAST *body)
+ : VarNames(varnames), Body(body) {}
+
+ virtual Value *Codegen();
+};
+</pre>
+</div>
+
+<p>var/in allows a list of names to be defined all at once, and each name can
+optionally have an initializer value. As such, we capture this information in
+the VarNames vector. Also, var/in has a body, this body is allowed to access
+the variables defined by the var/in.</p>
+
+<p>With this in place, we can define the parser pieces. The first thing we do is add
+it as a primary expression:</p>
+
+<div class="doc_code">
+<pre>
+/// primary
+/// ::= identifierexpr
+/// ::= numberexpr
+/// ::= parenexpr
+/// ::= ifexpr
+/// ::= forexpr
+<b>/// ::= varexpr</b>
+static ExprAST *ParsePrimary() {
+ switch (CurTok) {
+ default: return Error("unknown token when expecting an expression");
+ case tok_identifier: return ParseIdentifierExpr();
+ case tok_number: return ParseNumberExpr();
+ case '(': return ParseParenExpr();
+ case tok_if: return ParseIfExpr();
+ case tok_for: return ParseForExpr();
+ <b>case tok_var: return ParseVarExpr();</b>
+ }
+}
+</pre>
+</div>
+
+<p>Next we define ParseVarExpr:</p>
+
+<div class="doc_code">
+<pre>
+/// varexpr ::= 'var' identifier ('=' expression)?
+// (',' identifier ('=' expression)?)* 'in' expression
+static ExprAST *ParseVarExpr() {
+ getNextToken(); // eat the var.
+
+ std::vector&lt;std::pair&lt;std::string, ExprAST*&gt; &gt; VarNames;
+
+ // At least one variable name is required.
+ if (CurTok != tok_identifier)
+ return Error("expected identifier after var");
+</pre>
+</div>
+
+<p>The first part of this code parses the list of identifier/expr pairs into the
+local <tt>VarNames</tt> vector.
+
+<div class="doc_code">
+<pre>
+ while (1) {
+ std::string Name = IdentifierStr;
+ getNextToken(); // eat identifier.
+
+ // Read the optional initializer.
+ ExprAST *Init = 0;
+ if (CurTok == '=') {
+ getNextToken(); // eat the '='.
+
+ Init = ParseExpression();
+ if (Init == 0) return 0;
+ }
+
+ VarNames.push_back(std::make_pair(Name, Init));
+
+ // End of var list, exit loop.
+ if (CurTok != ',') break;
+ getNextToken(); // eat the ','.
+
+ if (CurTok != tok_identifier)
+ return Error("expected identifier list after var");
+ }
+</pre>
+</div>
+
+<p>Once all the variables are parsed, we then parse the body and create the
+AST node:</p>
+
+<div class="doc_code">
+<pre>
+ // At this point, we have to have 'in'.
+ if (CurTok != tok_in)
+ return Error("expected 'in' keyword after 'var'");
+ getNextToken(); // eat 'in'.
+
+ ExprAST *Body = ParseExpression();
+ if (Body == 0) return 0;
+
+ return new VarExprAST(VarNames, Body);
+}
+</pre>
+</div>
+
+<p>Now that we can parse and represent the code, we need to support emission of
+LLVM IR for it. This code starts out with:</p>
+
+<div class="doc_code">
+<pre>
+Value *VarExprAST::Codegen() {
+ std::vector&lt;AllocaInst *&gt; OldBindings;
+
+ Function *TheFunction = Builder.GetInsertBlock()-&gt;getParent();
+
+ // Register all variables and emit their initializer.
+ for (unsigned i = 0, e = VarNames.size(); i != e; ++i) {
+ const std::string &amp;VarName = VarNames[i].first;
+ ExprAST *Init = VarNames[i].second;
+</pre>
+</div>
+
+<p>Basically it loops over all the variables, installing them one at a time.
+For each variable we put into the symbol table, we remember the previous value
+that we replace in OldBindings.</p>
+
+<div class="doc_code">
+<pre>
+ // Emit the initializer before adding the variable to scope, this prevents
+ // the initializer from referencing the variable itself, and permits stuff
+ // like this:
+ // var a = 1 in
+ // var a = a in ... # refers to outer 'a'.
+ Value *InitVal;
+ if (Init) {
+ InitVal = Init-&gt;Codegen();
+ if (InitVal == 0) return 0;
+ } else { // If not specified, use 0.0.
+ InitVal = ConstantFP::get(getGlobalContext(), APFloat(0.0));
+ }
+
+ AllocaInst *Alloca = CreateEntryBlockAlloca(TheFunction, VarName);
+ Builder.CreateStore(InitVal, Alloca);
+
+ // Remember the old variable binding so that we can restore the binding when
+ // we unrecurse.
+ OldBindings.push_back(NamedValues[VarName]);
+
+ // Remember this binding.
+ NamedValues[VarName] = Alloca;
+ }
+</pre>
+</div>
+
+<p>There are more comments here than code. The basic idea is that we emit the
+initializer, create the alloca, then update the symbol table to point to it.
+Once all the variables are installed in the symbol table, we evaluate the body
+of the var/in expression:</p>
+
+<div class="doc_code">
+<pre>
+ // Codegen the body, now that all vars are in scope.
+ Value *BodyVal = Body-&gt;Codegen();
+ if (BodyVal == 0) return 0;
+</pre>
+</div>
+
+<p>Finally, before returning, we restore the previous variable bindings:</p>
+
+<div class="doc_code">
+<pre>
+ // Pop all our variables from scope.
+ for (unsigned i = 0, e = VarNames.size(); i != e; ++i)
+ NamedValues[VarNames[i].first] = OldBindings[i];
+
+ // Return the body computation.
+ return BodyVal;
+}
+</pre>
+</div>
+
+<p>The end result of all of this is that we get properly scoped variable
+definitions, and we even (trivially) allow mutation of them :).</p>
+
+<p>With this, we completed what we set out to do. Our nice iterative fib
+example from the intro compiles and runs just fine. The mem2reg pass optimizes
+all of our stack variables into SSA registers, inserting PHI nodes where needed,
+and our front-end remains simple: no "iterated dominance frontier" computation
+anywhere in sight.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="code">Full Code Listing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Here is the complete code listing for our running example, enhanced with mutable
+variables and var/in support. To build this example, use:
+</p>
+
+<div class="doc_code">
+<pre>
+# Compile
+clang++ -g toy.cpp `llvm-config --cppflags --ldflags --libs core jit native` -O3 -o toy
+# Run
+./toy
+</pre>
+</div>
+
+<p>Here is the code:</p>
+
+<div class="doc_code">
+<pre>
+#include "llvm/DerivedTypes.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/JIT.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Support/TargetSelect.h"
+#include &lt;cstdio&gt;
+#include &lt;string&gt;
+#include &lt;map&gt;
+#include &lt;vector&gt;
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Lexer
+//===----------------------------------------------------------------------===//
+
+// The lexer returns tokens [0-255] if it is an unknown character, otherwise one
+// of these for known things.
+enum Token {
+ tok_eof = -1,
+
+ // commands
+ tok_def = -2, tok_extern = -3,
+
+ // primary
+ tok_identifier = -4, tok_number = -5,
+
+ // control
+ tok_if = -6, tok_then = -7, tok_else = -8,
+ tok_for = -9, tok_in = -10,
+
+ // operators
+ tok_binary = -11, tok_unary = -12,
+
+ // var definition
+ tok_var = -13
+};
+
+static std::string IdentifierStr; // Filled in if tok_identifier
+static double NumVal; // Filled in if tok_number
+
+/// gettok - Return the next token from standard input.
+static int gettok() {
+ static int LastChar = ' ';
+
+ // Skip any whitespace.
+ while (isspace(LastChar))
+ LastChar = getchar();
+
+ if (isalpha(LastChar)) { // identifier: [a-zA-Z][a-zA-Z0-9]*
+ IdentifierStr = LastChar;
+ while (isalnum((LastChar = getchar())))
+ IdentifierStr += LastChar;
+
+ if (IdentifierStr == "def") return tok_def;
+ if (IdentifierStr == "extern") return tok_extern;
+ if (IdentifierStr == "if") return tok_if;
+ if (IdentifierStr == "then") return tok_then;
+ if (IdentifierStr == "else") return tok_else;
+ if (IdentifierStr == "for") return tok_for;
+ if (IdentifierStr == "in") return tok_in;
+ if (IdentifierStr == "binary") return tok_binary;
+ if (IdentifierStr == "unary") return tok_unary;
+ if (IdentifierStr == "var") return tok_var;
+ return tok_identifier;
+ }
+
+ if (isdigit(LastChar) || LastChar == '.') { // Number: [0-9.]+
+ std::string NumStr;
+ do {
+ NumStr += LastChar;
+ LastChar = getchar();
+ } while (isdigit(LastChar) || LastChar == '.');
+
+ NumVal = strtod(NumStr.c_str(), 0);
+ return tok_number;
+ }
+
+ if (LastChar == '#') {
+ // Comment until end of line.
+ do LastChar = getchar();
+ while (LastChar != EOF &amp;&amp; LastChar != '\n' &amp;&amp; LastChar != '\r');
+
+ if (LastChar != EOF)
+ return gettok();
+ }
+
+ // Check for end of file. Don't eat the EOF.
+ if (LastChar == EOF)
+ return tok_eof;
+
+ // Otherwise, just return the character as its ascii value.
+ int ThisChar = LastChar;
+ LastChar = getchar();
+ return ThisChar;
+}
+
+//===----------------------------------------------------------------------===//
+// Abstract Syntax Tree (aka Parse Tree)
+//===----------------------------------------------------------------------===//
+
+/// ExprAST - Base class for all expression nodes.
+class ExprAST {
+public:
+ virtual ~ExprAST() {}
+ virtual Value *Codegen() = 0;
+};
+
+/// NumberExprAST - Expression class for numeric literals like "1.0".
+class NumberExprAST : public ExprAST {
+ double Val;
+public:
+ NumberExprAST(double val) : Val(val) {}
+ virtual Value *Codegen();
+};
+
+/// VariableExprAST - Expression class for referencing a variable, like "a".
+class VariableExprAST : public ExprAST {
+ std::string Name;
+public:
+ VariableExprAST(const std::string &amp;name) : Name(name) {}
+ const std::string &amp;getName() const { return Name; }
+ virtual Value *Codegen();
+};
+
+/// UnaryExprAST - Expression class for a unary operator.
+class UnaryExprAST : public ExprAST {
+ char Opcode;
+ ExprAST *Operand;
+public:
+ UnaryExprAST(char opcode, ExprAST *operand)
+ : Opcode(opcode), Operand(operand) {}
+ virtual Value *Codegen();
+};
+
+/// BinaryExprAST - Expression class for a binary operator.
+class BinaryExprAST : public ExprAST {
+ char Op;
+ ExprAST *LHS, *RHS;
+public:
+ BinaryExprAST(char op, ExprAST *lhs, ExprAST *rhs)
+ : Op(op), LHS(lhs), RHS(rhs) {}
+ virtual Value *Codegen();
+};
+
+/// CallExprAST - Expression class for function calls.
+class CallExprAST : public ExprAST {
+ std::string Callee;
+ std::vector&lt;ExprAST*&gt; Args;
+public:
+ CallExprAST(const std::string &amp;callee, std::vector&lt;ExprAST*&gt; &amp;args)
+ : Callee(callee), Args(args) {}
+ virtual Value *Codegen();
+};
+
+/// IfExprAST - Expression class for if/then/else.
+class IfExprAST : public ExprAST {
+ ExprAST *Cond, *Then, *Else;
+public:
+ IfExprAST(ExprAST *cond, ExprAST *then, ExprAST *_else)
+ : Cond(cond), Then(then), Else(_else) {}
+ virtual Value *Codegen();
+};
+
+/// ForExprAST - Expression class for for/in.
+class ForExprAST : public ExprAST {
+ std::string VarName;
+ ExprAST *Start, *End, *Step, *Body;
+public:
+ ForExprAST(const std::string &amp;varname, ExprAST *start, ExprAST *end,
+ ExprAST *step, ExprAST *body)
+ : VarName(varname), Start(start), End(end), Step(step), Body(body) {}
+ virtual Value *Codegen();
+};
+
+/// VarExprAST - Expression class for var/in
+class VarExprAST : public ExprAST {
+ std::vector&lt;std::pair&lt;std::string, ExprAST*&gt; &gt; VarNames;
+ ExprAST *Body;
+public:
+ VarExprAST(const std::vector&lt;std::pair&lt;std::string, ExprAST*&gt; &gt; &amp;varnames,
+ ExprAST *body)
+ : VarNames(varnames), Body(body) {}
+
+ virtual Value *Codegen();
+};
+
+/// PrototypeAST - This class represents the "prototype" for a function,
+/// which captures its name, and its argument names (thus implicitly the number
+/// of arguments the function takes), as well as if it is an operator.
+class PrototypeAST {
+ std::string Name;
+ std::vector&lt;std::string&gt; Args;
+ bool isOperator;
+ unsigned Precedence; // Precedence if a binary op.
+public:
+ PrototypeAST(const std::string &amp;name, const std::vector&lt;std::string&gt; &amp;args,
+ bool isoperator = false, unsigned prec = 0)
+ : Name(name), Args(args), isOperator(isoperator), Precedence(prec) {}
+
+ bool isUnaryOp() const { return isOperator &amp;&amp; Args.size() == 1; }
+ bool isBinaryOp() const { return isOperator &amp;&amp; Args.size() == 2; }
+
+ char getOperatorName() const {
+ assert(isUnaryOp() || isBinaryOp());
+ return Name[Name.size()-1];
+ }
+
+ unsigned getBinaryPrecedence() const { return Precedence; }
+
+ Function *Codegen();
+
+ void CreateArgumentAllocas(Function *F);
+};
+
+/// FunctionAST - This class represents a function definition itself.
+class FunctionAST {
+ PrototypeAST *Proto;
+ ExprAST *Body;
+public:
+ FunctionAST(PrototypeAST *proto, ExprAST *body)
+ : Proto(proto), Body(body) {}
+
+ Function *Codegen();
+};
+
+//===----------------------------------------------------------------------===//
+// Parser
+//===----------------------------------------------------------------------===//
+
+/// CurTok/getNextToken - Provide a simple token buffer. CurTok is the current
+/// token the parser is looking at. getNextToken reads another token from the
+/// lexer and updates CurTok with its results.
+static int CurTok;
+static int getNextToken() {
+ return CurTok = gettok();
+}
+
+/// BinopPrecedence - This holds the precedence for each binary operator that is
+/// defined.
+static std::map&lt;char, int&gt; BinopPrecedence;
+
+/// GetTokPrecedence - Get the precedence of the pending binary operator token.
+static int GetTokPrecedence() {
+ if (!isascii(CurTok))
+ return -1;
+
+ // Make sure it's a declared binop.
+ int TokPrec = BinopPrecedence[CurTok];
+ if (TokPrec &lt;= 0) return -1;
+ return TokPrec;
+}
+
+/// Error* - These are little helper functions for error handling.
+ExprAST *Error(const char *Str) { fprintf(stderr, "Error: %s\n", Str);return 0;}
+PrototypeAST *ErrorP(const char *Str) { Error(Str); return 0; }
+FunctionAST *ErrorF(const char *Str) { Error(Str); return 0; }
+
+static ExprAST *ParseExpression();
+
+/// identifierexpr
+/// ::= identifier
+/// ::= identifier '(' expression* ')'
+static ExprAST *ParseIdentifierExpr() {
+ std::string IdName = IdentifierStr;
+
+ getNextToken(); // eat identifier.
+
+ if (CurTok != '(') // Simple variable ref.
+ return new VariableExprAST(IdName);
+
+ // Call.
+ getNextToken(); // eat (
+ std::vector&lt;ExprAST*&gt; Args;
+ if (CurTok != ')') {
+ while (1) {
+ ExprAST *Arg = ParseExpression();
+ if (!Arg) return 0;
+ Args.push_back(Arg);
+
+ if (CurTok == ')') break;
+
+ if (CurTok != ',')
+ return Error("Expected ')' or ',' in argument list");
+ getNextToken();
+ }
+ }
+
+ // Eat the ')'.
+ getNextToken();
+
+ return new CallExprAST(IdName, Args);
+}
+
+/// numberexpr ::= number
+static ExprAST *ParseNumberExpr() {
+ ExprAST *Result = new NumberExprAST(NumVal);
+ getNextToken(); // consume the number
+ return Result;
+}
+
+/// parenexpr ::= '(' expression ')'
+static ExprAST *ParseParenExpr() {
+ getNextToken(); // eat (.
+ ExprAST *V = ParseExpression();
+ if (!V) return 0;
+
+ if (CurTok != ')')
+ return Error("expected ')'");
+ getNextToken(); // eat ).
+ return V;
+}
+
+/// ifexpr ::= 'if' expression 'then' expression 'else' expression
+static ExprAST *ParseIfExpr() {
+ getNextToken(); // eat the if.
+
+ // condition.
+ ExprAST *Cond = ParseExpression();
+ if (!Cond) return 0;
+
+ if (CurTok != tok_then)
+ return Error("expected then");
+ getNextToken(); // eat the then
+
+ ExprAST *Then = ParseExpression();
+ if (Then == 0) return 0;
+
+ if (CurTok != tok_else)
+ return Error("expected else");
+
+ getNextToken();
+
+ ExprAST *Else = ParseExpression();
+ if (!Else) return 0;
+
+ return new IfExprAST(Cond, Then, Else);
+}
+
+/// forexpr ::= 'for' identifier '=' expr ',' expr (',' expr)? 'in' expression
+static ExprAST *ParseForExpr() {
+ getNextToken(); // eat the for.
+
+ if (CurTok != tok_identifier)
+ return Error("expected identifier after for");
+
+ std::string IdName = IdentifierStr;
+ getNextToken(); // eat identifier.
+
+ if (CurTok != '=')
+ return Error("expected '=' after for");
+ getNextToken(); // eat '='.
+
+
+ ExprAST *Start = ParseExpression();
+ if (Start == 0) return 0;
+ if (CurTok != ',')
+ return Error("expected ',' after for start value");
+ getNextToken();
+
+ ExprAST *End = ParseExpression();
+ if (End == 0) return 0;
+
+ // The step value is optional.
+ ExprAST *Step = 0;
+ if (CurTok == ',') {
+ getNextToken();
+ Step = ParseExpression();
+ if (Step == 0) return 0;
+ }
+
+ if (CurTok != tok_in)
+ return Error("expected 'in' after for");
+ getNextToken(); // eat 'in'.
+
+ ExprAST *Body = ParseExpression();
+ if (Body == 0) return 0;
+
+ return new ForExprAST(IdName, Start, End, Step, Body);
+}
+
+/// varexpr ::= 'var' identifier ('=' expression)?
+// (',' identifier ('=' expression)?)* 'in' expression
+static ExprAST *ParseVarExpr() {
+ getNextToken(); // eat the var.
+
+ std::vector&lt;std::pair&lt;std::string, ExprAST*&gt; &gt; VarNames;
+
+ // At least one variable name is required.
+ if (CurTok != tok_identifier)
+ return Error("expected identifier after var");
+
+ while (1) {
+ std::string Name = IdentifierStr;
+ getNextToken(); // eat identifier.
+
+ // Read the optional initializer.
+ ExprAST *Init = 0;
+ if (CurTok == '=') {
+ getNextToken(); // eat the '='.
+
+ Init = ParseExpression();
+ if (Init == 0) return 0;
+ }
+
+ VarNames.push_back(std::make_pair(Name, Init));
+
+ // End of var list, exit loop.
+ if (CurTok != ',') break;
+ getNextToken(); // eat the ','.
+
+ if (CurTok != tok_identifier)
+ return Error("expected identifier list after var");
+ }
+
+ // At this point, we have to have 'in'.
+ if (CurTok != tok_in)
+ return Error("expected 'in' keyword after 'var'");
+ getNextToken(); // eat 'in'.
+
+ ExprAST *Body = ParseExpression();
+ if (Body == 0) return 0;
+
+ return new VarExprAST(VarNames, Body);
+}
+
+/// primary
+/// ::= identifierexpr
+/// ::= numberexpr
+/// ::= parenexpr
+/// ::= ifexpr
+/// ::= forexpr
+/// ::= varexpr
+static ExprAST *ParsePrimary() {
+ switch (CurTok) {
+ default: return Error("unknown token when expecting an expression");
+ case tok_identifier: return ParseIdentifierExpr();
+ case tok_number: return ParseNumberExpr();
+ case '(': return ParseParenExpr();
+ case tok_if: return ParseIfExpr();
+ case tok_for: return ParseForExpr();
+ case tok_var: return ParseVarExpr();
+ }
+}
+
+/// unary
+/// ::= primary
+/// ::= '!' unary
+static ExprAST *ParseUnary() {
+ // If the current token is not an operator, it must be a primary expr.
+ if (!isascii(CurTok) || CurTok == '(' || CurTok == ',')
+ return ParsePrimary();
+
+ // If this is a unary operator, read it.
+ int Opc = CurTok;
+ getNextToken();
+ if (ExprAST *Operand = ParseUnary())
+ return new UnaryExprAST(Opc, Operand);
+ return 0;
+}
+
+/// binoprhs
+/// ::= ('+' unary)*
+static ExprAST *ParseBinOpRHS(int ExprPrec, ExprAST *LHS) {
+ // If this is a binop, find its precedence.
+ while (1) {
+ int TokPrec = GetTokPrecedence();
+
+ // If this is a binop that binds at least as tightly as the current binop,
+ // consume it, otherwise we are done.
+ if (TokPrec &lt; ExprPrec)
+ return LHS;
+
+ // Okay, we know this is a binop.
+ int BinOp = CurTok;
+ getNextToken(); // eat binop
+
+ // Parse the unary expression after the binary operator.
+ ExprAST *RHS = ParseUnary();
+ if (!RHS) return 0;
+
+ // If BinOp binds less tightly with RHS than the operator after RHS, let
+ // the pending operator take RHS as its LHS.
+ int NextPrec = GetTokPrecedence();
+ if (TokPrec &lt; NextPrec) {
+ RHS = ParseBinOpRHS(TokPrec+1, RHS);
+ if (RHS == 0) return 0;
+ }
+
+ // Merge LHS/RHS.
+ LHS = new BinaryExprAST(BinOp, LHS, RHS);
+ }
+}
+
+/// expression
+/// ::= unary binoprhs
+///
+static ExprAST *ParseExpression() {
+ ExprAST *LHS = ParseUnary();
+ if (!LHS) return 0;
+
+ return ParseBinOpRHS(0, LHS);
+}
+
+/// prototype
+/// ::= id '(' id* ')'
+/// ::= binary LETTER number? (id, id)
+/// ::= unary LETTER (id)
+static PrototypeAST *ParsePrototype() {
+ std::string FnName;
+
+ unsigned Kind = 0; // 0 = identifier, 1 = unary, 2 = binary.
+ unsigned BinaryPrecedence = 30;
+
+ switch (CurTok) {
+ default:
+ return ErrorP("Expected function name in prototype");
+ case tok_identifier:
+ FnName = IdentifierStr;
+ Kind = 0;
+ getNextToken();
+ break;
+ case tok_unary:
+ getNextToken();
+ if (!isascii(CurTok))
+ return ErrorP("Expected unary operator");
+ FnName = "unary";
+ FnName += (char)CurTok;
+ Kind = 1;
+ getNextToken();
+ break;
+ case tok_binary:
+ getNextToken();
+ if (!isascii(CurTok))
+ return ErrorP("Expected binary operator");
+ FnName = "binary";
+ FnName += (char)CurTok;
+ Kind = 2;
+ getNextToken();
+
+ // Read the precedence if present.
+ if (CurTok == tok_number) {
+ if (NumVal &lt; 1 || NumVal &gt; 100)
+ return ErrorP("Invalid precedecnce: must be 1..100");
+ BinaryPrecedence = (unsigned)NumVal;
+ getNextToken();
+ }
+ break;
+ }
+
+ if (CurTok != '(')
+ return ErrorP("Expected '(' in prototype");
+
+ std::vector&lt;std::string&gt; ArgNames;
+ while (getNextToken() == tok_identifier)
+ ArgNames.push_back(IdentifierStr);
+ if (CurTok != ')')
+ return ErrorP("Expected ')' in prototype");
+
+ // success.
+ getNextToken(); // eat ')'.
+
+ // Verify right number of names for operator.
+ if (Kind &amp;&amp; ArgNames.size() != Kind)
+ return ErrorP("Invalid number of operands for operator");
+
+ return new PrototypeAST(FnName, ArgNames, Kind != 0, BinaryPrecedence);
+}
+
+/// definition ::= 'def' prototype expression
+static FunctionAST *ParseDefinition() {
+ getNextToken(); // eat def.
+ PrototypeAST *Proto = ParsePrototype();
+ if (Proto == 0) return 0;
+
+ if (ExprAST *E = ParseExpression())
+ return new FunctionAST(Proto, E);
+ return 0;
+}
+
+/// toplevelexpr ::= expression
+static FunctionAST *ParseTopLevelExpr() {
+ if (ExprAST *E = ParseExpression()) {
+ // Make an anonymous proto.
+ PrototypeAST *Proto = new PrototypeAST("", std::vector&lt;std::string&gt;());
+ return new FunctionAST(Proto, E);
+ }
+ return 0;
+}
+
+/// external ::= 'extern' prototype
+static PrototypeAST *ParseExtern() {
+ getNextToken(); // eat extern.
+ return ParsePrototype();
+}
+
+//===----------------------------------------------------------------------===//
+// Code Generation
+//===----------------------------------------------------------------------===//
+
+static Module *TheModule;
+static IRBuilder&lt;&gt; Builder(getGlobalContext());
+static std::map&lt;std::string, AllocaInst*&gt; NamedValues;
+static FunctionPassManager *TheFPM;
+
+Value *ErrorV(const char *Str) { Error(Str); return 0; }
+
+/// CreateEntryBlockAlloca - Create an alloca instruction in the entry block of
+/// the function. This is used for mutable variables etc.
+static AllocaInst *CreateEntryBlockAlloca(Function *TheFunction,
+ const std::string &amp;VarName) {
+ IRBuilder&lt;&gt; TmpB(&amp;TheFunction-&gt;getEntryBlock(),
+ TheFunction-&gt;getEntryBlock().begin());
+ return TmpB.CreateAlloca(Type::getDoubleTy(getGlobalContext()), 0,
+ VarName.c_str());
+}
+
+Value *NumberExprAST::Codegen() {
+ return ConstantFP::get(getGlobalContext(), APFloat(Val));
+}
+
+Value *VariableExprAST::Codegen() {
+ // Look this variable up in the function.
+ Value *V = NamedValues[Name];
+ if (V == 0) return ErrorV("Unknown variable name");
+
+ // Load the value.
+ return Builder.CreateLoad(V, Name.c_str());
+}
+
+Value *UnaryExprAST::Codegen() {
+ Value *OperandV = Operand-&gt;Codegen();
+ if (OperandV == 0) return 0;
+
+ Function *F = TheModule-&gt;getFunction(std::string("unary")+Opcode);
+ if (F == 0)
+ return ErrorV("Unknown unary operator");
+
+ return Builder.CreateCall(F, OperandV, "unop");
+}
+
+Value *BinaryExprAST::Codegen() {
+ // Special case '=' because we don't want to emit the LHS as an expression.
+ if (Op == '=') {
+ // Assignment requires the LHS to be an identifier.
+ VariableExprAST *LHSE = dynamic_cast&lt;VariableExprAST*&gt;(LHS);
+ if (!LHSE)
+ return ErrorV("destination of '=' must be a variable");
+ // Codegen the RHS.
+ Value *Val = RHS-&gt;Codegen();
+ if (Val == 0) return 0;
+
+ // Look up the name.
+ Value *Variable = NamedValues[LHSE-&gt;getName()];
+ if (Variable == 0) return ErrorV("Unknown variable name");
+
+ Builder.CreateStore(Val, Variable);
+ return Val;
+ }
+
+ Value *L = LHS-&gt;Codegen();
+ Value *R = RHS-&gt;Codegen();
+ if (L == 0 || R == 0) return 0;
+
+ switch (Op) {
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
+ case '&lt;':
+ L = Builder.CreateFCmpULT(L, R, "cmptmp");
+ // Convert bool 0/1 to double 0.0 or 1.0
+ return Builder.CreateUIToFP(L, Type::getDoubleTy(getGlobalContext()),
+ "booltmp");
+ default: break;
+ }
+
+ // If it wasn't a builtin binary operator, it must be a user defined one. Emit
+ // a call to it.
+ Function *F = TheModule-&gt;getFunction(std::string("binary")+Op);
+ assert(F &amp;&amp; "binary operator not found!");
+
+ Value *Ops[2] = { L, R };
+ return Builder.CreateCall(F, Ops, "binop");
+}
+
+Value *CallExprAST::Codegen() {
+ // Look up the name in the global module table.
+ Function *CalleeF = TheModule-&gt;getFunction(Callee);
+ if (CalleeF == 0)
+ return ErrorV("Unknown function referenced");
+
+ // If argument mismatch error.
+ if (CalleeF-&gt;arg_size() != Args.size())
+ return ErrorV("Incorrect # arguments passed");
+
+ std::vector&lt;Value*&gt; ArgsV;
+ for (unsigned i = 0, e = Args.size(); i != e; ++i) {
+ ArgsV.push_back(Args[i]-&gt;Codegen());
+ if (ArgsV.back() == 0) return 0;
+ }
+
+ return Builder.CreateCall(CalleeF, ArgsV, "calltmp");
+}
+
+Value *IfExprAST::Codegen() {
+ Value *CondV = Cond-&gt;Codegen();
+ if (CondV == 0) return 0;
+
+ // Convert condition to a bool by comparing equal to 0.0.
+ CondV = Builder.CreateFCmpONE(CondV,
+ ConstantFP::get(getGlobalContext(), APFloat(0.0)),
+ "ifcond");
+
+ Function *TheFunction = Builder.GetInsertBlock()-&gt;getParent();
+
+ // Create blocks for the then and else cases. Insert the 'then' block at the
+ // end of the function.
+ BasicBlock *ThenBB = BasicBlock::Create(getGlobalContext(), "then", TheFunction);
+ BasicBlock *ElseBB = BasicBlock::Create(getGlobalContext(), "else");
+ BasicBlock *MergeBB = BasicBlock::Create(getGlobalContext(), "ifcont");
+
+ Builder.CreateCondBr(CondV, ThenBB, ElseBB);
+
+ // Emit then value.
+ Builder.SetInsertPoint(ThenBB);
+
+ Value *ThenV = Then-&gt;Codegen();
+ if (ThenV == 0) return 0;
+
+ Builder.CreateBr(MergeBB);
+ // Codegen of 'Then' can change the current block, update ThenBB for the PHI.
+ ThenBB = Builder.GetInsertBlock();
+
+ // Emit else block.
+ TheFunction-&gt;getBasicBlockList().push_back(ElseBB);
+ Builder.SetInsertPoint(ElseBB);
+
+ Value *ElseV = Else-&gt;Codegen();
+ if (ElseV == 0) return 0;
+
+ Builder.CreateBr(MergeBB);
+ // Codegen of 'Else' can change the current block, update ElseBB for the PHI.
+ ElseBB = Builder.GetInsertBlock();
+
+ // Emit merge block.
+ TheFunction-&gt;getBasicBlockList().push_back(MergeBB);
+ Builder.SetInsertPoint(MergeBB);
+ PHINode *PN = Builder.CreatePHI(Type::getDoubleTy(getGlobalContext()), 2,
+ "iftmp");
+
+ PN-&gt;addIncoming(ThenV, ThenBB);
+ PN-&gt;addIncoming(ElseV, ElseBB);
+ return PN;
+}
+
+Value *ForExprAST::Codegen() {
+ // Output this as:
+ // var = alloca double
+ // ...
+ // start = startexpr
+ // store start -&gt; var
+ // goto loop
+ // loop:
+ // ...
+ // bodyexpr
+ // ...
+ // loopend:
+ // step = stepexpr
+ // endcond = endexpr
+ //
+ // curvar = load var
+ // nextvar = curvar + step
+ // store nextvar -&gt; var
+ // br endcond, loop, endloop
+ // outloop:
+
+ Function *TheFunction = Builder.GetInsertBlock()-&gt;getParent();
+
+ // Create an alloca for the variable in the entry block.
+ AllocaInst *Alloca = CreateEntryBlockAlloca(TheFunction, VarName);
+
+ // Emit the start code first, without 'variable' in scope.
+ Value *StartVal = Start-&gt;Codegen();
+ if (StartVal == 0) return 0;
+
+ // Store the value into the alloca.
+ Builder.CreateStore(StartVal, Alloca);
+
+ // Make the new basic block for the loop header, inserting after current
+ // block.
+ BasicBlock *LoopBB = BasicBlock::Create(getGlobalContext(), "loop", TheFunction);
+
+ // Insert an explicit fall through from the current block to the LoopBB.
+ Builder.CreateBr(LoopBB);
+
+ // Start insertion in LoopBB.
+ Builder.SetInsertPoint(LoopBB);
+
+ // Within the loop, the variable is defined equal to the PHI node. If it
+ // shadows an existing variable, we have to restore it, so save it now.
+ AllocaInst *OldVal = NamedValues[VarName];
+ NamedValues[VarName] = Alloca;
+
+ // Emit the body of the loop. This, like any other expr, can change the
+ // current BB. Note that we ignore the value computed by the body, but don't
+ // allow an error.
+ if (Body-&gt;Codegen() == 0)
+ return 0;
+
+ // Emit the step value.
+ Value *StepVal;
+ if (Step) {
+ StepVal = Step-&gt;Codegen();
+ if (StepVal == 0) return 0;
+ } else {
+ // If not specified, use 1.0.
+ StepVal = ConstantFP::get(getGlobalContext(), APFloat(1.0));
+ }
+
+ // Compute the end condition.
+ Value *EndCond = End-&gt;Codegen();
+ if (EndCond == 0) return EndCond;
+
+ // Reload, increment, and restore the alloca. This handles the case where
+ // the body of the loop mutates the variable.
+ Value *CurVar = Builder.CreateLoad(Alloca, VarName.c_str());
+ Value *NextVar = Builder.CreateFAdd(CurVar, StepVal, "nextvar");
+ Builder.CreateStore(NextVar, Alloca);
+
+ // Convert condition to a bool by comparing equal to 0.0.
+ EndCond = Builder.CreateFCmpONE(EndCond,
+ ConstantFP::get(getGlobalContext(), APFloat(0.0)),
+ "loopcond");
+
+ // Create the "after loop" block and insert it.
+ BasicBlock *AfterBB = BasicBlock::Create(getGlobalContext(), "afterloop", TheFunction);
+
+ // Insert the conditional branch into the end of LoopEndBB.
+ Builder.CreateCondBr(EndCond, LoopBB, AfterBB);
+
+ // Any new code will be inserted in AfterBB.
+ Builder.SetInsertPoint(AfterBB);
+
+ // Restore the unshadowed variable.
+ if (OldVal)
+ NamedValues[VarName] = OldVal;
+ else
+ NamedValues.erase(VarName);
+
+
+ // for expr always returns 0.0.
+ return Constant::getNullValue(Type::getDoubleTy(getGlobalContext()));
+}
+
+Value *VarExprAST::Codegen() {
+ std::vector&lt;AllocaInst *&gt; OldBindings;
+
+ Function *TheFunction = Builder.GetInsertBlock()-&gt;getParent();
+
+ // Register all variables and emit their initializer.
+ for (unsigned i = 0, e = VarNames.size(); i != e; ++i) {
+ const std::string &amp;VarName = VarNames[i].first;
+ ExprAST *Init = VarNames[i].second;
+
+ // Emit the initializer before adding the variable to scope, this prevents
+ // the initializer from referencing the variable itself, and permits stuff
+ // like this:
+ // var a = 1 in
+ // var a = a in ... # refers to outer 'a'.
+ Value *InitVal;
+ if (Init) {
+ InitVal = Init-&gt;Codegen();
+ if (InitVal == 0) return 0;
+ } else { // If not specified, use 0.0.
+ InitVal = ConstantFP::get(getGlobalContext(), APFloat(0.0));
+ }
+
+ AllocaInst *Alloca = CreateEntryBlockAlloca(TheFunction, VarName);
+ Builder.CreateStore(InitVal, Alloca);
+
+ // Remember the old variable binding so that we can restore the binding when
+ // we unrecurse.
+ OldBindings.push_back(NamedValues[VarName]);
+
+ // Remember this binding.
+ NamedValues[VarName] = Alloca;
+ }
+
+ // Codegen the body, now that all vars are in scope.
+ Value *BodyVal = Body-&gt;Codegen();
+ if (BodyVal == 0) return 0;
+
+ // Pop all our variables from scope.
+ for (unsigned i = 0, e = VarNames.size(); i != e; ++i)
+ NamedValues[VarNames[i].first] = OldBindings[i];
+
+ // Return the body computation.
+ return BodyVal;
+}
+
+Function *PrototypeAST::Codegen() {
+ // Make the function type: double(double,double) etc.
+ std::vector&lt;Type*&gt; Doubles(Args.size(),
+ Type::getDoubleTy(getGlobalContext()));
+ FunctionType *FT = FunctionType::get(Type::getDoubleTy(getGlobalContext()),
+ Doubles, false);
+
+ Function *F = Function::Create(FT, Function::ExternalLinkage, Name, TheModule);
+
+ // If F conflicted, there was already something named 'Name'. If it has a
+ // body, don't allow redefinition or reextern.
+ if (F-&gt;getName() != Name) {
+ // Delete the one we just made and get the existing one.
+ F-&gt;eraseFromParent();
+ F = TheModule-&gt;getFunction(Name);
+
+ // If F already has a body, reject this.
+ if (!F-&gt;empty()) {
+ ErrorF("redefinition of function");
+ return 0;
+ }
+
+ // If F took a different number of args, reject.
+ if (F-&gt;arg_size() != Args.size()) {
+ ErrorF("redefinition of function with different # args");
+ return 0;
+ }
+ }
+
+ // Set names for all arguments.
+ unsigned Idx = 0;
+ for (Function::arg_iterator AI = F-&gt;arg_begin(); Idx != Args.size();
+ ++AI, ++Idx)
+ AI-&gt;setName(Args[Idx]);
+
+ return F;
+}
+
+/// CreateArgumentAllocas - Create an alloca for each argument and register the
+/// argument in the symbol table so that references to it will succeed.
+void PrototypeAST::CreateArgumentAllocas(Function *F) {
+ Function::arg_iterator AI = F-&gt;arg_begin();
+ for (unsigned Idx = 0, e = Args.size(); Idx != e; ++Idx, ++AI) {
+ // Create an alloca for this variable.
+ AllocaInst *Alloca = CreateEntryBlockAlloca(F, Args[Idx]);
+
+ // Store the initial value into the alloca.
+ Builder.CreateStore(AI, Alloca);
+
+ // Add arguments to variable symbol table.
+ NamedValues[Args[Idx]] = Alloca;
+ }
+}
+
+Function *FunctionAST::Codegen() {
+ NamedValues.clear();
+
+ Function *TheFunction = Proto-&gt;Codegen();
+ if (TheFunction == 0)
+ return 0;
+
+ // If this is an operator, install it.
+ if (Proto-&gt;isBinaryOp())
+ BinopPrecedence[Proto-&gt;getOperatorName()] = Proto-&gt;getBinaryPrecedence();
+
+ // Create a new basic block to start insertion into.
+ BasicBlock *BB = BasicBlock::Create(getGlobalContext(), "entry", TheFunction);
+ Builder.SetInsertPoint(BB);
+
+ // Add all arguments to the symbol table and create their allocas.
+ Proto-&gt;CreateArgumentAllocas(TheFunction);
+
+ if (Value *RetVal = Body-&gt;Codegen()) {
+ // Finish off the function.
+ Builder.CreateRet(RetVal);
+
+ // Validate the generated code, checking for consistency.
+ verifyFunction(*TheFunction);
+
+ // Optimize the function.
+ TheFPM-&gt;run(*TheFunction);
+
+ return TheFunction;
+ }
+
+ // Error reading body, remove function.
+ TheFunction-&gt;eraseFromParent();
+
+ if (Proto-&gt;isBinaryOp())
+ BinopPrecedence.erase(Proto-&gt;getOperatorName());
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Top-Level parsing and JIT Driver
+//===----------------------------------------------------------------------===//
+
+static ExecutionEngine *TheExecutionEngine;
+
+static void HandleDefinition() {
+ if (FunctionAST *F = ParseDefinition()) {
+ if (Function *LF = F-&gt;Codegen()) {
+ fprintf(stderr, "Read function definition:");
+ LF-&gt;dump();
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+static void HandleExtern() {
+ if (PrototypeAST *P = ParseExtern()) {
+ if (Function *F = P-&gt;Codegen()) {
+ fprintf(stderr, "Read extern: ");
+ F-&gt;dump();
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+static void HandleTopLevelExpression() {
+ // Evaluate a top-level expression into an anonymous function.
+ if (FunctionAST *F = ParseTopLevelExpr()) {
+ if (Function *LF = F-&gt;Codegen()) {
+ // JIT the function, returning a function pointer.
+ void *FPtr = TheExecutionEngine-&gt;getPointerToFunction(LF);
+
+ // Cast it to the right type (takes no arguments, returns a double) so we
+ // can call it as a native function.
+ double (*FP)() = (double (*)())(intptr_t)FPtr;
+ fprintf(stderr, "Evaluated to %f\n", FP());
+ }
+ } else {
+ // Skip token for error recovery.
+ getNextToken();
+ }
+}
+
+/// top ::= definition | external | expression | ';'
+static void MainLoop() {
+ while (1) {
+ fprintf(stderr, "ready&gt; ");
+ switch (CurTok) {
+ case tok_eof: return;
+ case ';': getNextToken(); break; // ignore top-level semicolons.
+ case tok_def: HandleDefinition(); break;
+ case tok_extern: HandleExtern(); break;
+ default: HandleTopLevelExpression(); break;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// "Library" functions that can be "extern'd" from user code.
+//===----------------------------------------------------------------------===//
+
+/// putchard - putchar that takes a double and returns 0.
+extern "C"
+double putchard(double X) {
+ putchar((char)X);
+ return 0;
+}
+
+/// printd - printf that takes a double prints it as "%f\n", returning 0.
+extern "C"
+double printd(double X) {
+ printf("%f\n", X);
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Main driver code.
+//===----------------------------------------------------------------------===//
+
+int main() {
+ InitializeNativeTarget();
+ LLVMContext &amp;Context = getGlobalContext();
+
+ // Install standard binary operators.
+ // 1 is lowest precedence.
+ BinopPrecedence['='] = 2;
+ BinopPrecedence['&lt;'] = 10;
+ BinopPrecedence['+'] = 20;
+ BinopPrecedence['-'] = 20;
+ BinopPrecedence['*'] = 40; // highest.
+
+ // Prime the first token.
+ fprintf(stderr, "ready&gt; ");
+ getNextToken();
+
+ // Make the module, which holds all the code.
+ TheModule = new Module("my cool jit", Context);
+
+ // Create the JIT. This takes ownership of the module.
+ std::string ErrStr;
+ TheExecutionEngine = EngineBuilder(TheModule).setErrorStr(&amp;ErrStr).create();
+ if (!TheExecutionEngine) {
+ fprintf(stderr, "Could not create ExecutionEngine: %s\n", ErrStr.c_str());
+ exit(1);
+ }
+
+ FunctionPassManager OurFPM(TheModule);
+
+ // Set up the optimizer pipeline. Start with registering info about how the
+ // target lays out data structures.
+ OurFPM.add(new TargetData(*TheExecutionEngine-&gt;getTargetData()));
+ // Provide basic AliasAnalysis support for GVN.
+ OurFPM.add(createBasicAliasAnalysisPass());
+ // Promote allocas to registers.
+ OurFPM.add(createPromoteMemoryToRegisterPass());
+ // Do simple "peephole" optimizations and bit-twiddling optzns.
+ OurFPM.add(createInstructionCombiningPass());
+ // Reassociate expressions.
+ OurFPM.add(createReassociatePass());
+ // Eliminate Common SubExpressions.
+ OurFPM.add(createGVNPass());
+ // Simplify the control flow graph (deleting unreachable blocks, etc).
+ OurFPM.add(createCFGSimplificationPass());
+
+ OurFPM.doInitialization();
+
+ // Set the global so the code gen can use this.
+ TheFPM = &amp;OurFPM;
+
+ // Run the main "interpreter loop" now.
+ MainLoop();
+
+ TheFPM = 0;
+
+ // Print out all of the generated code.
+ TheModule-&gt;dump();
+
+ return 0;
+}
+</pre>
+</div>
+
+<a href="LangImpl8.html">Next: Conclusion and other useful LLVM tidbits</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/LangImpl8.html b/docs/tutorial/LangImpl8.html
new file mode 100644
index 00000000000..7c1a500a21b
--- /dev/null
+++ b/docs/tutorial/LangImpl8.html
@@ -0,0 +1,359 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Conclusion and other useful LLVM tidbits</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Conclusion and other useful LLVM tidbits</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 8
+ <ol>
+ <li><a href="#conclusion">Tutorial Conclusion</a></li>
+ <li><a href="#llvmirproperties">Properties of LLVM IR</a>
+ <ul>
+ <li><a href="#targetindep">Target Independence</a></li>
+ <li><a href="#safety">Safety Guarantees</a></li>
+ <li><a href="#langspecific">Language-Specific Optimizations</a></li>
+ </ul>
+ </li>
+ <li><a href="#tipsandtricks">Tips and Tricks</a>
+ <ul>
+ <li><a href="#offsetofsizeof">Implementing portable
+ offsetof/sizeof</a></li>
+ <li><a href="#gcstack">Garbage Collected Stack Frames</a></li>
+ </ul>
+ </li>
+ </ol>
+</li>
+</ul>
+
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="conclusion">Tutorial Conclusion</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to the final chapter of the "<a href="index.html">Implementing a
+language with LLVM</a>" tutorial. In the course of this tutorial, we have grown
+our little Kaleidoscope language from being a useless toy, to being a
+semi-interesting (but probably still useless) toy. :)</p>
+
+<p>It is interesting to see how far we've come, and how little code it has
+taken. We built the entire lexer, parser, AST, code generator, and an
+interactive run-loop (with a JIT!) by-hand in under 700 lines of
+(non-comment/non-blank) code.</p>
+
+<p>Our little language supports a couple of interesting features: it supports
+user defined binary and unary operators, it uses JIT compilation for immediate
+evaluation, and it supports a few control flow constructs with SSA construction.
+</p>
+
+<p>Part of the idea of this tutorial was to show you how easy and fun it can be
+to define, build, and play with languages. Building a compiler need not be a
+scary or mystical process! Now that you've seen some of the basics, I strongly
+encourage you to take the code and hack on it. For example, try adding:</p>
+
+<ul>
+<li><b>global variables</b> - While global variables have questional value in
+modern software engineering, they are often useful when putting together quick
+little hacks like the Kaleidoscope compiler itself. Fortunately, our current
+setup makes it very easy to add global variables: just have value lookup check
+to see if an unresolved variable is in the global variable symbol table before
+rejecting it. To create a new global variable, make an instance of the LLVM
+<tt>GlobalVariable</tt> class.</li>
+
+<li><b>typed variables</b> - Kaleidoscope currently only supports variables of
+type double. This gives the language a very nice elegance, because only
+supporting one type means that you never have to specify types. Different
+languages have different ways of handling this. The easiest way is to require
+the user to specify types for every variable definition, and record the type
+of the variable in the symbol table along with its Value*.</li>
+
+<li><b>arrays, structs, vectors, etc</b> - Once you add types, you can start
+extending the type system in all sorts of interesting ways. Simple arrays are
+very easy and are quite useful for many different applications. Adding them is
+mostly an exercise in learning how the LLVM <a
+href="../LangRef.html#i_getelementptr">getelementptr</a> instruction works: it
+is so nifty/unconventional, it <a
+href="../GetElementPtr.html">has its own FAQ</a>! If you add support
+for recursive types (e.g. linked lists), make sure to read the <a
+href="../ProgrammersManual.html#TypeResolve">section in the LLVM
+Programmer's Manual</a> that describes how to construct them.</li>
+
+<li><b>standard runtime</b> - Our current language allows the user to access
+arbitrary external functions, and we use it for things like "printd" and
+"putchard". As you extend the language to add higher-level constructs, often
+these constructs make the most sense if they are lowered to calls into a
+language-supplied runtime. For example, if you add hash tables to the language,
+it would probably make sense to add the routines to a runtime, instead of
+inlining them all the way.</li>
+
+<li><b>memory management</b> - Currently we can only access the stack in
+Kaleidoscope. It would also be useful to be able to allocate heap memory,
+either with calls to the standard libc malloc/free interface or with a garbage
+collector. If you would like to use garbage collection, note that LLVM fully
+supports <a href="../GarbageCollection.html">Accurate Garbage Collection</a>
+including algorithms that move objects and need to scan/update the stack.</li>
+
+<li><b>debugger support</b> - LLVM supports generation of <a
+href="../SourceLevelDebugging.html">DWARF Debug info</a> which is understood by
+common debuggers like GDB. Adding support for debug info is fairly
+straightforward. The best way to understand it is to compile some C/C++ code
+with "<tt>llvm-gcc -g -O0</tt>" and taking a look at what it produces.</li>
+
+<li><b>exception handling support</b> - LLVM supports generation of <a
+href="../ExceptionHandling.html">zero cost exceptions</a> which interoperate
+with code compiled in other languages. You could also generate code by
+implicitly making every function return an error value and checking it. You
+could also make explicit use of setjmp/longjmp. There are many different ways
+to go here.</li>
+
+<li><b>object orientation, generics, database access, complex numbers,
+geometric programming, ...</b> - Really, there is
+no end of crazy features that you can add to the language.</li>
+
+<li><b>unusual domains</b> - We've been talking about applying LLVM to a domain
+that many people are interested in: building a compiler for a specific language.
+However, there are many other domains that can use compiler technology that are
+not typically considered. For example, LLVM has been used to implement OpenGL
+graphics acceleration, translate C++ code to ActionScript, and many other
+cute and clever things. Maybe you will be the first to JIT compile a regular
+expression interpreter into native code with LLVM?</li>
+
+</ul>
+
+<p>
+Have fun - try doing something crazy and unusual. Building a language like
+everyone else always has, is much less fun than trying something a little crazy
+or off the wall and seeing how it turns out. If you get stuck or want to talk
+about it, feel free to email the <a
+href="http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev">llvmdev mailing
+list</a>: it has lots of people who are interested in languages and are often
+willing to help out.
+</p>
+
+<p>Before we end this tutorial, I want to talk about some "tips and tricks" for generating
+LLVM IR. These are some of the more subtle things that may not be obvious, but
+are very useful if you want to take advantage of LLVM's capabilities.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="llvmirproperties">Properties of the LLVM IR</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>We have a couple common questions about code in the LLVM IR form - lets just
+get these out of the way right now, shall we?</p>
+
+<!-- ======================================================================= -->
+<h4><a name="targetindep">Target Independence</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>Kaleidoscope is an example of a "portable language": any program written in
+Kaleidoscope will work the same way on any target that it runs on. Many other
+languages have this property, e.g. lisp, java, haskell, javascript, python, etc
+(note that while these languages are portable, not all their libraries are).</p>
+
+<p>One nice aspect of LLVM is that it is often capable of preserving target
+independence in the IR: you can take the LLVM IR for a Kaleidoscope-compiled
+program and run it on any target that LLVM supports, even emitting C code and
+compiling that on targets that LLVM doesn't support natively. You can trivially
+tell that the Kaleidoscope compiler generates target-independent code because it
+never queries for any target-specific information when generating code.</p>
+
+<p>The fact that LLVM provides a compact, target-independent, representation for
+code gets a lot of people excited. Unfortunately, these people are usually
+thinking about C or a language from the C family when they are asking questions
+about language portability. I say "unfortunately", because there is really no
+way to make (fully general) C code portable, other than shipping the source code
+around (and of course, C source code is not actually portable in general
+either - ever port a really old application from 32- to 64-bits?).</p>
+
+<p>The problem with C (again, in its full generality) is that it is heavily
+laden with target specific assumptions. As one simple example, the preprocessor
+often destructively removes target-independence from the code when it processes
+the input text:</p>
+
+<div class="doc_code">
+<pre>
+#ifdef __i386__
+ int X = 1;
+#else
+ int X = 42;
+#endif
+</pre>
+</div>
+
+<p>While it is possible to engineer more and more complex solutions to problems
+like this, it cannot be solved in full generality in a way that is better than shipping
+the actual source code.</p>
+
+<p>That said, there are interesting subsets of C that can be made portable. If
+you are willing to fix primitive types to a fixed size (say int = 32-bits,
+and long = 64-bits), don't care about ABI compatibility with existing binaries,
+and are willing to give up some other minor features, you can have portable
+code. This can make sense for specialized domains such as an
+in-kernel language.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="safety">Safety Guarantees</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>Many of the languages above are also "safe" languages: it is impossible for
+a program written in Java to corrupt its address space and crash the process
+(assuming the JVM has no bugs).
+Safety is an interesting property that requires a combination of language
+design, runtime support, and often operating system support.</p>
+
+<p>It is certainly possible to implement a safe language in LLVM, but LLVM IR
+does not itself guarantee safety. The LLVM IR allows unsafe pointer casts,
+use after free bugs, buffer over-runs, and a variety of other problems. Safety
+needs to be implemented as a layer on top of LLVM and, conveniently, several
+groups have investigated this. Ask on the <a
+href="http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev">llvmdev mailing
+list</a> if you are interested in more details.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="langspecific">Language-Specific Optimizations</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>One thing about LLVM that turns off many people is that it does not solve all
+the world's problems in one system (sorry 'world hunger', someone else will have
+to solve you some other day). One specific complaint is that people perceive
+LLVM as being incapable of performing high-level language-specific optimization:
+LLVM "loses too much information".</p>
+
+<p>Unfortunately, this is really not the place to give you a full and unified
+version of "Chris Lattner's theory of compiler design". Instead, I'll make a
+few observations:</p>
+
+<p>First, you're right that LLVM does lose information. For example, as of this
+writing, there is no way to distinguish in the LLVM IR whether an SSA-value came
+from a C "int" or a C "long" on an ILP32 machine (other than debug info). Both
+get compiled down to an 'i32' value and the information about what it came from
+is lost. The more general issue here, is that the LLVM type system uses
+"structural equivalence" instead of "name equivalence". Another place this
+surprises people is if you have two types in a high-level language that have the
+same structure (e.g. two different structs that have a single int field): these
+types will compile down into a single LLVM type and it will be impossible to
+tell what it came from.</p>
+
+<p>Second, while LLVM does lose information, LLVM is not a fixed target: we
+continue to enhance and improve it in many different ways. In addition to
+adding new features (LLVM did not always support exceptions or debug info), we
+also extend the IR to capture important information for optimization (e.g.
+whether an argument is sign or zero extended, information about pointers
+aliasing, etc). Many of the enhancements are user-driven: people want LLVM to
+include some specific feature, so they go ahead and extend it.</p>
+
+<p>Third, it is <em>possible and easy</em> to add language-specific
+optimizations, and you have a number of choices in how to do it. As one trivial
+example, it is easy to add language-specific optimization passes that
+"know" things about code compiled for a language. In the case of the C family,
+there is an optimization pass that "knows" about the standard C library
+functions. If you call "exit(0)" in main(), it knows that it is safe to
+optimize that into "return 0;" because C specifies what the 'exit'
+function does.</p>
+
+<p>In addition to simple library knowledge, it is possible to embed a variety of
+other language-specific information into the LLVM IR. If you have a specific
+need and run into a wall, please bring the topic up on the llvmdev list. At the
+very worst, you can always treat LLVM as if it were a "dumb code generator" and
+implement the high-level optimizations you desire in your front-end, on the
+language-specific AST.
+</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="tipsandtricks">Tips and Tricks</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>There is a variety of useful tips and tricks that you come to know after
+working on/with LLVM that aren't obvious at first glance. Instead of letting
+everyone rediscover them, this section talks about some of these issues.</p>
+
+<!-- ======================================================================= -->
+<h4><a name="offsetofsizeof">Implementing portable offsetof/sizeof</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>One interesting thing that comes up, if you are trying to keep the code
+generated by your compiler "target independent", is that you often need to know
+the size of some LLVM type or the offset of some field in an llvm structure.
+For example, you might need to pass the size of a type into a function that
+allocates memory.</p>
+
+<p>Unfortunately, this can vary widely across targets: for example the width of
+a pointer is trivially target-specific. However, there is a <a
+href="http://nondot.org/sabre/LLVMNotes/SizeOf-OffsetOf-VariableSizedStructs.txt">clever
+way to use the getelementptr instruction</a> that allows you to compute this
+in a portable way.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="gcstack">Garbage Collected Stack Frames</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>Some languages want to explicitly manage their stack frames, often so that
+they are garbage collected or to allow easy implementation of closures. There
+are often better ways to implement these features than explicit stack frames,
+but <a
+href="http://nondot.org/sabre/LLVMNotes/ExplicitlyManagedStackFrames.txt">LLVM
+does support them,</a> if you want. It requires your front-end to convert the
+code into <a
+href="http://en.wikipedia.org/wiki/Continuation-passing_style">Continuation
+Passing Style</a> and the use of tail calls (which LLVM also supports).</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/OCamlLangImpl1.html b/docs/tutorial/OCamlLangImpl1.html
new file mode 100644
index 00000000000..73fe07bb840
--- /dev/null
+++ b/docs/tutorial/OCamlLangImpl1.html
@@ -0,0 +1,365 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Tutorial Introduction and the Lexer</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <meta name="author" content="Erick Tryzelaar">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Tutorial Introduction and the Lexer</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 1
+ <ol>
+ <li><a href="#intro">Tutorial Introduction</a></li>
+ <li><a href="#language">The Basic Language</a></li>
+ <li><a href="#lexer">The Lexer</a></li>
+ </ol>
+</li>
+<li><a href="OCamlLangImpl2.html">Chapter 2</a>: Implementing a Parser and
+AST</li>
+</ul>
+
+<div class="doc_author">
+ <p>
+ Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a>
+ and <a href="mailto:idadesub@users.sourceforge.net">Erick Tryzelaar</a>
+ </p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Tutorial Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to the "Implementing a language with LLVM" tutorial. This tutorial
+runs through the implementation of a simple language, showing how fun and
+easy it can be. This tutorial will get you up and started as well as help to
+build a framework you can extend to other languages. The code in this tutorial
+can also be used as a playground to hack on other LLVM specific things.
+</p>
+
+<p>
+The goal of this tutorial is to progressively unveil our language, describing
+how it is built up over time. This will let us cover a fairly broad range of
+language design and LLVM-specific usage issues, showing and explaining the code
+for it all along the way, without overwhelming you with tons of details up
+front.</p>
+
+<p>It is useful to point out ahead of time that this tutorial is really about
+teaching compiler techniques and LLVM specifically, <em>not</em> about teaching
+modern and sane software engineering principles. In practice, this means that
+we'll take a number of shortcuts to simplify the exposition. For example, the
+code leaks memory, uses global variables all over the place, doesn't use nice
+design patterns like <a
+href="http://en.wikipedia.org/wiki/Visitor_pattern">visitors</a>, etc... but it
+is very simple. If you dig in and use the code as a basis for future projects,
+fixing these deficiencies shouldn't be hard.</p>
+
+<p>I've tried to put this tutorial together in a way that makes chapters easy to
+skip over if you are already familiar with or are uninterested in the various
+pieces. The structure of the tutorial is:
+</p>
+
+<ul>
+<li><b><a href="#language">Chapter #1</a>: Introduction to the Kaleidoscope
+language, and the definition of its Lexer</b> - This shows where we are going
+and the basic functionality that we want it to do. In order to make this
+tutorial maximally understandable and hackable, we choose to implement
+everything in Objective Caml instead of using lexer and parser generators.
+LLVM obviously works just fine with such tools, feel free to use one if you
+prefer.</li>
+<li><b><a href="OCamlLangImpl2.html">Chapter #2</a>: Implementing a Parser and
+AST</b> - With the lexer in place, we can talk about parsing techniques and
+basic AST construction. This tutorial describes recursive descent parsing and
+operator precedence parsing. Nothing in Chapters 1 or 2 is LLVM-specific,
+the code doesn't even link in LLVM at this point. :)</li>
+<li><b><a href="OCamlLangImpl3.html">Chapter #3</a>: Code generation to LLVM
+IR</b> - With the AST ready, we can show off how easy generation of LLVM IR
+really is.</li>
+<li><b><a href="OCamlLangImpl4.html">Chapter #4</a>: Adding JIT and Optimizer
+Support</b> - Because a lot of people are interested in using LLVM as a JIT,
+we'll dive right into it and show you the 3 lines it takes to add JIT support.
+LLVM is also useful in many other ways, but this is one simple and "sexy" way
+to shows off its power. :)</li>
+<li><b><a href="OCamlLangImpl5.html">Chapter #5</a>: Extending the Language:
+Control Flow</b> - With the language up and running, we show how to extend it
+with control flow operations (if/then/else and a 'for' loop). This gives us a
+chance to talk about simple SSA construction and control flow.</li>
+<li><b><a href="OCamlLangImpl6.html">Chapter #6</a>: Extending the Language:
+User-defined Operators</b> - This is a silly but fun chapter that talks about
+extending the language to let the user program define their own arbitrary
+unary and binary operators (with assignable precedence!). This lets us build a
+significant piece of the "language" as library routines.</li>
+<li><b><a href="OCamlLangImpl7.html">Chapter #7</a>: Extending the Language:
+Mutable Variables</b> - This chapter talks about adding user-defined local
+variables along with an assignment operator. The interesting part about this
+is how easy and trivial it is to construct SSA form in LLVM: no, LLVM does
+<em>not</em> require your front-end to construct SSA form!</li>
+<li><b><a href="OCamlLangImpl8.html">Chapter #8</a>: Conclusion and other
+useful LLVM tidbits</b> - This chapter wraps up the series by talking about
+potential ways to extend the language, but also includes a bunch of pointers to
+info about "special topics" like adding garbage collection support, exceptions,
+debugging, support for "spaghetti stacks", and a bunch of other tips and
+tricks.</li>
+
+</ul>
+
+<p>By the end of the tutorial, we'll have written a bit less than 700 lines of
+non-comment, non-blank, lines of code. With this small amount of code, we'll
+have built up a very reasonable compiler for a non-trivial language including
+a hand-written lexer, parser, AST, as well as code generation support with a JIT
+compiler. While other systems may have interesting "hello world" tutorials,
+I think the breadth of this tutorial is a great testament to the strengths of
+LLVM and why you should consider it if you're interested in language or compiler
+design.</p>
+
+<p>A note about this tutorial: we expect you to extend the language and play
+with it on your own. Take the code and go crazy hacking away at it, compilers
+don't need to be scary creatures - it can be a lot of fun to play with
+languages!</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="language">The Basic Language</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>This tutorial will be illustrated with a toy language that we'll call
+"<a href="http://en.wikipedia.org/wiki/Kaleidoscope">Kaleidoscope</a>" (derived
+from "meaning beautiful, form, and view").
+Kaleidoscope is a procedural language that allows you to define functions, use
+conditionals, math, etc. Over the course of the tutorial, we'll extend
+Kaleidoscope to support the if/then/else construct, a for loop, user defined
+operators, JIT compilation with a simple command line interface, etc.</p>
+
+<p>Because we want to keep things simple, the only datatype in Kaleidoscope is a
+64-bit floating point type (aka 'float' in O'Caml parlance). As such, all
+values are implicitly double precision and the language doesn't require type
+declarations. This gives the language a very nice and simple syntax. For
+example, the following simple example computes <a
+href="http://en.wikipedia.org/wiki/Fibonacci_number">Fibonacci numbers:</a></p>
+
+<div class="doc_code">
+<pre>
+# Compute the x'th fibonacci number.
+def fib(x)
+ if x &lt; 3 then
+ 1
+ else
+ fib(x-1)+fib(x-2)
+
+# This expression will compute the 40th number.
+fib(40)
+</pre>
+</div>
+
+<p>We also allow Kaleidoscope to call into standard library functions (the LLVM
+JIT makes this completely trivial). This means that you can use the 'extern'
+keyword to define a function before you use it (this is also useful for mutually
+recursive functions). For example:</p>
+
+<div class="doc_code">
+<pre>
+extern sin(arg);
+extern cos(arg);
+extern atan2(arg1 arg2);
+
+atan2(sin(.4), cos(42))
+</pre>
+</div>
+
+<p>A more interesting example is included in Chapter 6 where we write a little
+Kaleidoscope application that <a href="OCamlLangImpl6.html#example">displays
+a Mandelbrot Set</a> at various levels of magnification.</p>
+
+<p>Lets dive into the implementation of this language!</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="lexer">The Lexer</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>When it comes to implementing a language, the first thing needed is
+the ability to process a text file and recognize what it says. The traditional
+way to do this is to use a "<a
+href="http://en.wikipedia.org/wiki/Lexical_analysis">lexer</a>" (aka 'scanner')
+to break the input up into "tokens". Each token returned by the lexer includes
+a token code and potentially some metadata (e.g. the numeric value of a number).
+First, we define the possibilities:
+</p>
+
+<div class="doc_code">
+<pre>
+(* The lexer returns these 'Kwd' if it is an unknown character, otherwise one of
+ * these others for known things. *)
+type token =
+ (* commands *)
+ | Def | Extern
+
+ (* primary *)
+ | Ident of string | Number of float
+
+ (* unknown *)
+ | Kwd of char
+</pre>
+</div>
+
+<p>Each token returned by our lexer will be one of the token variant values.
+An unknown character like '+' will be returned as <tt>Token.Kwd '+'</tt>. If
+the curr token is an identifier, the value will be <tt>Token.Ident s</tt>. If
+the current token is a numeric literal (like 1.0), the value will be
+<tt>Token.Number 1.0</tt>.
+</p>
+
+<p>The actual implementation of the lexer is a collection of functions driven
+by a function named <tt>Lexer.lex</tt>. The <tt>Lexer.lex</tt> function is
+called to return the next token from standard input. We will use
+<a href="http://caml.inria.fr/pub/docs/manual-camlp4/index.html">Camlp4</a>
+to simplify the tokenization of the standard input. Its definition starts
+as:</p>
+
+<div class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Lexer
+ *===----------------------------------------------------------------------===*)
+
+let rec lex = parser
+ (* Skip any whitespace. *)
+ | [&lt; ' (' ' | '\n' | '\r' | '\t'); stream &gt;] -&gt; lex stream
+</pre>
+</div>
+
+<p>
+<tt>Lexer.lex</tt> works by recursing over a <tt>char Stream.t</tt> to read
+characters one at a time from the standard input. It eats them as it recognizes
+them and stores them in in a <tt>Token.token</tt> variant. The first thing that
+it has to do is ignore whitespace between tokens. This is accomplished with the
+recursive call above.</p>
+
+<p>The next thing <tt>Lexer.lex</tt> needs to do is recognize identifiers and
+specific keywords like "def". Kaleidoscope does this with a pattern match
+and a helper function.<p>
+
+<div class="doc_code">
+<pre>
+ (* identifier: [a-zA-Z][a-zA-Z0-9] *)
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+
+...
+
+and lex_ident buffer = parser
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' | '0' .. '9' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ match Buffer.contents buffer with
+ | "def" -&gt; [&lt; 'Token.Def; stream &gt;]
+ | "extern" -&gt; [&lt; 'Token.Extern; stream &gt;]
+ | id -&gt; [&lt; 'Token.Ident id; stream &gt;]
+</pre>
+</div>
+
+<p>Numeric values are similar:</p>
+
+<div class="doc_code">
+<pre>
+ (* number: [0-9.]+ *)
+ | [&lt; ' ('0' .. '9' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+
+...
+
+and lex_number buffer = parser
+ | [&lt; ' ('0' .. '9' | '.' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ [&lt; 'Token.Number (float_of_string (Buffer.contents buffer)); stream &gt;]
+</pre>
+</div>
+
+<p>This is all pretty straight-forward code for processing input. When reading
+a numeric value from input, we use the ocaml <tt>float_of_string</tt> function
+to convert it to a numeric value that we store in <tt>Token.Number</tt>. Note
+that this isn't doing sufficient error checking: it will raise <tt>Failure</tt>
+if the string "1.23.45.67". Feel free to extend it :). Next we handle
+comments:
+</p>
+
+<div class="doc_code">
+<pre>
+ (* Comment until end of line. *)
+ | [&lt; ' ('#'); stream &gt;] -&gt;
+ lex_comment stream
+
+...
+
+and lex_comment = parser
+ | [&lt; ' ('\n'); stream=lex &gt;] -&gt; stream
+ | [&lt; 'c; e=lex_comment &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+</pre>
+</div>
+
+<p>We handle comments by skipping to the end of the line and then return the
+next token. Finally, if the input doesn't match one of the above cases, it is
+either an operator character like '+' or the end of the file. These are handled
+with this code:</p>
+
+<div class="doc_code">
+<pre>
+ (* Otherwise, just return the character as its ascii value. *)
+ | [&lt; 'c; stream &gt;] -&gt;
+ [&lt; 'Token.Kwd c; lex stream &gt;]
+
+ (* end of stream. *)
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+</pre>
+</div>
+
+<p>With this, we have the complete lexer for the basic Kaleidoscope language
+(the <a href="OCamlLangImpl2.html#code">full code listing</a> for the Lexer is
+available in the <a href="OCamlLangImpl2.html">next chapter</a> of the
+tutorial). Next we'll <a href="OCamlLangImpl2.html">build a simple parser that
+uses this to build an Abstract Syntax Tree</a>. When we have that, we'll
+include a driver so that you can use the lexer and parser together.
+</p>
+
+<a href="OCamlLangImpl2.html">Next: Implementing a Parser and AST</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="mailto:idadesub@users.sourceforge.net">Erick Tryzelaar</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/OCamlLangImpl2.html b/docs/tutorial/OCamlLangImpl2.html
new file mode 100644
index 00000000000..dd7e07b4224
--- /dev/null
+++ b/docs/tutorial/OCamlLangImpl2.html
@@ -0,0 +1,1043 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Implementing a Parser and AST</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <meta name="author" content="Erick Tryzelaar">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Implementing a Parser and AST</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 2
+ <ol>
+ <li><a href="#intro">Chapter 2 Introduction</a></li>
+ <li><a href="#ast">The Abstract Syntax Tree (AST)</a></li>
+ <li><a href="#parserbasics">Parser Basics</a></li>
+ <li><a href="#parserprimexprs">Basic Expression Parsing</a></li>
+ <li><a href="#parserbinops">Binary Expression Parsing</a></li>
+ <li><a href="#parsertop">Parsing the Rest</a></li>
+ <li><a href="#driver">The Driver</a></li>
+ <li><a href="#conclusions">Conclusions</a></li>
+ <li><a href="#code">Full Code Listing</a></li>
+ </ol>
+</li>
+<li><a href="OCamlLangImpl3.html">Chapter 3</a>: Code generation to LLVM IR</li>
+</ul>
+
+<div class="doc_author">
+ <p>
+ Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a>
+ and <a href="mailto:idadesub@users.sourceforge.net">Erick Tryzelaar</a>
+ </p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Chapter 2 Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to Chapter 2 of the "<a href="index.html">Implementing a language
+with LLVM in Objective Caml</a>" tutorial. This chapter shows you how to use
+the lexer, built in <a href="OCamlLangImpl1.html">Chapter 1</a>, to build a
+full <a href="http://en.wikipedia.org/wiki/Parsing">parser</a> for our
+Kaleidoscope language. Once we have a parser, we'll define and build an <a
+href="http://en.wikipedia.org/wiki/Abstract_syntax_tree">Abstract Syntax
+Tree</a> (AST).</p>
+
+<p>The parser we will build uses a combination of <a
+href="http://en.wikipedia.org/wiki/Recursive_descent_parser">Recursive Descent
+Parsing</a> and <a href=
+"http://en.wikipedia.org/wiki/Operator-precedence_parser">Operator-Precedence
+Parsing</a> to parse the Kaleidoscope language (the latter for
+binary expressions and the former for everything else). Before we get to
+parsing though, lets talk about the output of the parser: the Abstract Syntax
+Tree.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="ast">The Abstract Syntax Tree (AST)</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The AST for a program captures its behavior in such a way that it is easy for
+later stages of the compiler (e.g. code generation) to interpret. We basically
+want one object for each construct in the language, and the AST should closely
+model the language. In Kaleidoscope, we have expressions, a prototype, and a
+function object. We'll start with expressions first:</p>
+
+<div class="doc_code">
+<pre>
+(* expr - Base type for all expression nodes. *)
+type expr =
+ (* variant for numeric literals like "1.0". *)
+ | Number of float
+</pre>
+</div>
+
+<p>The code above shows the definition of the base ExprAST class and one
+subclass which we use for numeric literals. The important thing to note about
+this code is that the Number variant captures the numeric value of the
+literal as an instance variable. This allows later phases of the compiler to
+know what the stored numeric value is.</p>
+
+<p>Right now we only create the AST, so there are no useful functions on
+them. It would be very easy to add a function to pretty print the code,
+for example. Here are the other expression AST node definitions that we'll use
+in the basic form of the Kaleidoscope language:
+</p>
+
+<div class="doc_code">
+<pre>
+ (* variant for referencing a variable, like "a". *)
+ | Variable of string
+
+ (* variant for a binary operator. *)
+ | Binary of char * expr * expr
+
+ (* variant for function calls. *)
+ | Call of string * expr array
+</pre>
+</div>
+
+<p>This is all (intentionally) rather straight-forward: variables capture the
+variable name, binary operators capture their opcode (e.g. '+'), and calls
+capture a function name as well as a list of any argument expressions. One thing
+that is nice about our AST is that it captures the language features without
+talking about the syntax of the language. Note that there is no discussion about
+precedence of binary operators, lexical structure, etc.</p>
+
+<p>For our basic language, these are all of the expression nodes we'll define.
+Because it doesn't have conditional control flow, it isn't Turing-complete;
+we'll fix that in a later installment. The two things we need next are a way
+to talk about the interface to a function, and a way to talk about functions
+themselves:</p>
+
+<div class="doc_code">
+<pre>
+(* proto - This type represents the "prototype" for a function, which captures
+ * its name, and its argument names (thus implicitly the number of arguments the
+ * function takes). *)
+type proto = Prototype of string * string array
+
+(* func - This type represents a function definition itself. *)
+type func = Function of proto * expr
+</pre>
+</div>
+
+<p>In Kaleidoscope, functions are typed with just a count of their arguments.
+Since all values are double precision floating point, the type of each argument
+doesn't need to be stored anywhere. In a more aggressive and realistic
+language, the "expr" variants would probably have a type field.</p>
+
+<p>With this scaffolding, we can now talk about parsing expressions and function
+bodies in Kaleidoscope.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="parserbasics">Parser Basics</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Now that we have an AST to build, we need to define the parser code to build
+it. The idea here is that we want to parse something like "x+y" (which is
+returned as three tokens by the lexer) into an AST that could be generated with
+calls like this:</p>
+
+<div class="doc_code">
+<pre>
+ let x = Variable "x" in
+ let y = Variable "y" in
+ let result = Binary ('+', x, y) in
+ ...
+</pre>
+</div>
+
+<p>
+The error handling routines make use of the builtin <tt>Stream.Failure</tt> and
+<tt>Stream.Error</tt>s. <tt>Stream.Failure</tt> is raised when the parser is
+unable to find any matching token in the first position of a pattern.
+<tt>Stream.Error</tt> is raised when the first token matches, but the rest do
+not. The error recovery in our parser will not be the best and is not
+particular user-friendly, but it will be enough for our tutorial. These
+exceptions make it easier to handle errors in routines that have various return
+types.</p>
+
+<p>With these basic types and exceptions, we can implement the first
+piece of our grammar: numeric literals.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="parserprimexprs">Basic Expression Parsing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>We start with numeric literals, because they are the simplest to process.
+For each production in our grammar, we'll define a function which parses that
+production. We call this class of expressions "primary" expressions, for
+reasons that will become more clear <a href="OCamlLangImpl6.html#unary">
+later in the tutorial</a>. In order to parse an arbitrary primary expression,
+we need to determine what sort of expression it is. For numeric literals, we
+have:</p>
+
+<div class="doc_code">
+<pre>
+(* primary
+ * ::= identifier
+ * ::= numberexpr
+ * ::= parenexpr *)
+parse_primary = parser
+ (* numberexpr ::= number *)
+ | [&lt; 'Token.Number n &gt;] -&gt; Ast.Number n
+</pre>
+</div>
+
+<p>This routine is very simple: it expects to be called when the current token
+is a <tt>Token.Number</tt> token. It takes the current number value, creates
+a <tt>Ast.Number</tt> node, advances the lexer to the next token, and finally
+returns.</p>
+
+<p>There are some interesting aspects to this. The most important one is that
+this routine eats all of the tokens that correspond to the production and
+returns the lexer buffer with the next token (which is not part of the grammar
+production) ready to go. This is a fairly standard way to go for recursive
+descent parsers. For a better example, the parenthesis operator is defined like
+this:</p>
+
+<div class="doc_code">
+<pre>
+ (* parenexpr ::= '(' expression ')' *)
+ | [&lt; 'Token.Kwd '('; e=parse_expr; 'Token.Kwd ')' ?? "expected ')'" &gt;] -&gt; e
+</pre>
+</div>
+
+<p>This function illustrates a number of interesting things about the
+parser:</p>
+
+<p>
+1) It shows how we use the <tt>Stream.Error</tt> exception. When called, this
+function expects that the current token is a '(' token, but after parsing the
+subexpression, it is possible that there is no ')' waiting. For example, if
+the user types in "(4 x" instead of "(4)", the parser should emit an error.
+Because errors can occur, the parser needs a way to indicate that they
+happened. In our parser, we use the camlp4 shortcut syntax <tt>token ?? "parse
+error"</tt>, where if the token before the <tt>??</tt> does not match, then
+<tt>Stream.Error "parse error"</tt> will be raised.</p>
+
+<p>2) Another interesting aspect of this function is that it uses recursion by
+calling <tt>Parser.parse_primary</tt> (we will soon see that
+<tt>Parser.parse_primary</tt> can call <tt>Parser.parse_primary</tt>). This is
+powerful because it allows us to handle recursive grammars, and keeps each
+production very simple. Note that parentheses do not cause construction of AST
+nodes themselves. While we could do it this way, the most important role of
+parentheses are to guide the parser and provide grouping. Once the parser
+constructs the AST, parentheses are not needed.</p>
+
+<p>The next simple production is for handling variable references and function
+calls:</p>
+
+<div class="doc_code">
+<pre>
+ (* identifierexpr
+ * ::= identifier
+ * ::= identifier '(' argumentexpr ')' *)
+ | [&lt; 'Token.Ident id; stream &gt;] -&gt;
+ let rec parse_args accumulator = parser
+ | [&lt; e=parse_expr; stream &gt;] -&gt;
+ begin parser
+ | [&lt; 'Token.Kwd ','; e=parse_args (e :: accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; e :: accumulator
+ end stream
+ | [&lt; &gt;] -&gt; accumulator
+ in
+ let rec parse_ident id = parser
+ (* Call. *)
+ | [&lt; 'Token.Kwd '(';
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')'"&gt;] -&gt;
+ Ast.Call (id, Array.of_list (List.rev args))
+
+ (* Simple variable ref. *)
+ | [&lt; &gt;] -&gt; Ast.Variable id
+ in
+ parse_ident id stream
+</pre>
+</div>
+
+<p>This routine follows the same style as the other routines. (It expects to be
+called if the current token is a <tt>Token.Ident</tt> token). It also has
+recursion and error handling. One interesting aspect of this is that it uses
+<em>look-ahead</em> to determine if the current identifier is a stand alone
+variable reference or if it is a function call expression. It handles this by
+checking to see if the token after the identifier is a '(' token, constructing
+either a <tt>Ast.Variable</tt> or <tt>Ast.Call</tt> node as appropriate.
+</p>
+
+<p>We finish up by raising an exception if we received a token we didn't
+expect:</p>
+
+<div class="doc_code">
+<pre>
+ | [&lt; &gt;] -&gt; raise (Stream.Error "unknown token when expecting an expression.")
+</pre>
+</div>
+
+<p>Now that basic expressions are handled, we need to handle binary expressions.
+They are a bit more complex.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="parserbinops">Binary Expression Parsing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Binary expressions are significantly harder to parse because they are often
+ambiguous. For example, when given the string "x+y*z", the parser can choose
+to parse it as either "(x+y)*z" or "x+(y*z)". With common definitions from
+mathematics, we expect the later parse, because "*" (multiplication) has
+higher <em>precedence</em> than "+" (addition).</p>
+
+<p>There are many ways to handle this, but an elegant and efficient way is to
+use <a href=
+"http://en.wikipedia.org/wiki/Operator-precedence_parser">Operator-Precedence
+Parsing</a>. This parsing technique uses the precedence of binary operators to
+guide recursion. To start with, we need a table of precedences:</p>
+
+<div class="doc_code">
+<pre>
+(* binop_precedence - This holds the precedence for each binary operator that is
+ * defined *)
+let binop_precedence:(char, int) Hashtbl.t = Hashtbl.create 10
+
+(* precedence - Get the precedence of the pending binary operator token. *)
+let precedence c = try Hashtbl.find binop_precedence c with Not_found -&gt; -1
+
+...
+
+let main () =
+ (* Install standard binary operators.
+ * 1 is the lowest precedence. *)
+ Hashtbl.add Parser.binop_precedence '&lt;' 10;
+ Hashtbl.add Parser.binop_precedence '+' 20;
+ Hashtbl.add Parser.binop_precedence '-' 20;
+ Hashtbl.add Parser.binop_precedence '*' 40; (* highest. *)
+ ...
+</pre>
+</div>
+
+<p>For the basic form of Kaleidoscope, we will only support 4 binary operators
+(this can obviously be extended by you, our brave and intrepid reader). The
+<tt>Parser.precedence</tt> function returns the precedence for the current
+token, or -1 if the token is not a binary operator. Having a <tt>Hashtbl.t</tt>
+makes it easy to add new operators and makes it clear that the algorithm doesn't
+depend on the specific operators involved, but it would be easy enough to
+eliminate the <tt>Hashtbl.t</tt> and do the comparisons in the
+<tt>Parser.precedence</tt> function. (Or just use a fixed-size array).</p>
+
+<p>With the helper above defined, we can now start parsing binary expressions.
+The basic idea of operator precedence parsing is to break down an expression
+with potentially ambiguous binary operators into pieces. Consider ,for example,
+the expression "a+b+(c+d)*e*f+g". Operator precedence parsing considers this
+as a stream of primary expressions separated by binary operators. As such,
+it will first parse the leading primary expression "a", then it will see the
+pairs [+, b] [+, (c+d)] [*, e] [*, f] and [+, g]. Note that because parentheses
+are primary expressions, the binary expression parser doesn't need to worry
+about nested subexpressions like (c+d) at all.
+</p>
+
+<p>
+To start, an expression is a primary expression potentially followed by a
+sequence of [binop,primaryexpr] pairs:</p>
+
+<div class="doc_code">
+<pre>
+(* expression
+ * ::= primary binoprhs *)
+and parse_expr = parser
+ | [&lt; lhs=parse_primary; stream &gt;] -&gt; parse_bin_rhs 0 lhs stream
+</pre>
+</div>
+
+<p><tt>Parser.parse_bin_rhs</tt> is the function that parses the sequence of
+pairs for us. It takes a precedence and a pointer to an expression for the part
+that has been parsed so far. Note that "x" is a perfectly valid expression: As
+such, "binoprhs" is allowed to be empty, in which case it returns the expression
+that is passed into it. In our example above, the code passes the expression for
+"a" into <tt>Parser.parse_bin_rhs</tt> and the current token is "+".</p>
+
+<p>The precedence value passed into <tt>Parser.parse_bin_rhs</tt> indicates the
+<em>minimal operator precedence</em> that the function is allowed to eat. For
+example, if the current pair stream is [+, x] and <tt>Parser.parse_bin_rhs</tt>
+is passed in a precedence of 40, it will not consume any tokens (because the
+precedence of '+' is only 20). With this in mind, <tt>Parser.parse_bin_rhs</tt>
+starts with:</p>
+
+<div class="doc_code">
+<pre>
+(* binoprhs
+ * ::= ('+' primary)* *)
+and parse_bin_rhs expr_prec lhs stream =
+ match Stream.peek stream with
+ (* If this is a binop, find its precedence. *)
+ | Some (Token.Kwd c) when Hashtbl.mem binop_precedence c -&gt;
+ let token_prec = precedence c in
+
+ (* If this is a binop that binds at least as tightly as the current binop,
+ * consume it, otherwise we are done. *)
+ if token_prec &lt; expr_prec then lhs else begin
+</pre>
+</div>
+
+<p>This code gets the precedence of the current token and checks to see if if is
+too low. Because we defined invalid tokens to have a precedence of -1, this
+check implicitly knows that the pair-stream ends when the token stream runs out
+of binary operators. If this check succeeds, we know that the token is a binary
+operator and that it will be included in this expression:</p>
+
+<div class="doc_code">
+<pre>
+ (* Eat the binop. *)
+ Stream.junk stream;
+
+ (* Okay, we know this is a binop. *)
+ let rhs =
+ match Stream.peek stream with
+ | Some (Token.Kwd c2) -&gt;
+</pre>
+</div>
+
+<p>As such, this code eats (and remembers) the binary operator and then parses
+the primary expression that follows. This builds up the whole pair, the first of
+which is [+, b] for the running example.</p>
+
+<p>Now that we parsed the left-hand side of an expression and one pair of the
+RHS sequence, we have to decide which way the expression associates. In
+particular, we could have "(a+b) binop unparsed" or "a + (b binop unparsed)".
+To determine this, we look ahead at "binop" to determine its precedence and
+compare it to BinOp's precedence (which is '+' in this case):</p>
+
+<div class="doc_code">
+<pre>
+ (* If BinOp binds less tightly with rhs than the operator after
+ * rhs, let the pending operator take rhs as its lhs. *)
+ let next_prec = precedence c2 in
+ if token_prec &lt; next_prec
+</pre>
+</div>
+
+<p>If the precedence of the binop to the right of "RHS" is lower or equal to the
+precedence of our current operator, then we know that the parentheses associate
+as "(a+b) binop ...". In our example, the current operator is "+" and the next
+operator is "+", we know that they have the same precedence. In this case we'll
+create the AST node for "a+b", and then continue parsing:</p>
+
+<div class="doc_code">
+<pre>
+ ... if body omitted ...
+ in
+
+ (* Merge lhs/rhs. *)
+ let lhs = Ast.Binary (c, lhs, rhs) in
+ parse_bin_rhs expr_prec lhs stream
+ end
+</pre>
+</div>
+
+<p>In our example above, this will turn "a+b+" into "(a+b)" and execute the next
+iteration of the loop, with "+" as the current token. The code above will eat,
+remember, and parse "(c+d)" as the primary expression, which makes the
+current pair equal to [+, (c+d)]. It will then evaluate the 'if' conditional above with
+"*" as the binop to the right of the primary. In this case, the precedence of "*" is
+higher than the precedence of "+" so the if condition will be entered.</p>
+
+<p>The critical question left here is "how can the if condition parse the right
+hand side in full"? In particular, to build the AST correctly for our example,
+it needs to get all of "(c+d)*e*f" as the RHS expression variable. The code to
+do this is surprisingly simple (code from the above two blocks duplicated for
+context):</p>
+
+<div class="doc_code">
+<pre>
+ match Stream.peek stream with
+ | Some (Token.Kwd c2) -&gt;
+ (* If BinOp binds less tightly with rhs than the operator after
+ * rhs, let the pending operator take rhs as its lhs. *)
+ if token_prec &lt; precedence c2
+ then <b>parse_bin_rhs (token_prec + 1) rhs stream</b>
+ else rhs
+ | _ -&gt; rhs
+ in
+
+ (* Merge lhs/rhs. *)
+ let lhs = Ast.Binary (c, lhs, rhs) in
+ parse_bin_rhs expr_prec lhs stream
+ end
+</pre>
+</div>
+
+<p>At this point, we know that the binary operator to the RHS of our primary
+has higher precedence than the binop we are currently parsing. As such, we know
+that any sequence of pairs whose operators are all higher precedence than "+"
+should be parsed together and returned as "RHS". To do this, we recursively
+invoke the <tt>Parser.parse_bin_rhs</tt> function specifying "token_prec+1" as
+the minimum precedence required for it to continue. In our example above, this
+will cause it to return the AST node for "(c+d)*e*f" as RHS, which is then set
+as the RHS of the '+' expression.</p>
+
+<p>Finally, on the next iteration of the while loop, the "+g" piece is parsed
+and added to the AST. With this little bit of code (14 non-trivial lines), we
+correctly handle fully general binary expression parsing in a very elegant way.
+This was a whirlwind tour of this code, and it is somewhat subtle. I recommend
+running through it with a few tough examples to see how it works.
+</p>
+
+<p>This wraps up handling of expressions. At this point, we can point the
+parser at an arbitrary token stream and build an expression from it, stopping
+at the first token that is not part of the expression. Next up we need to
+handle function definitions, etc.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="parsertop">Parsing the Rest</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+The next thing missing is handling of function prototypes. In Kaleidoscope,
+these are used both for 'extern' function declarations as well as function body
+definitions. The code to do this is straight-forward and not very interesting
+(once you've survived expressions):
+</p>
+
+<div class="doc_code">
+<pre>
+(* prototype
+ * ::= id '(' id* ')' *)
+let parse_prototype =
+ let rec parse_args accumulator = parser
+ | [&lt; 'Token.Ident id; e=parse_args (id::accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; accumulator
+ in
+
+ parser
+ | [&lt; 'Token.Ident id;
+ 'Token.Kwd '(' ?? "expected '(' in prototype";
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')' in prototype" &gt;] -&gt;
+ (* success. *)
+ Ast.Prototype (id, Array.of_list (List.rev args))
+
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected function name in prototype")
+</pre>
+</div>
+
+<p>Given this, a function definition is very simple, just a prototype plus
+an expression to implement the body:</p>
+
+<div class="doc_code">
+<pre>
+(* definition ::= 'def' prototype expression *)
+let parse_definition = parser
+ | [&lt; 'Token.Def; p=parse_prototype; e=parse_expr &gt;] -&gt;
+ Ast.Function (p, e)
+</pre>
+</div>
+
+<p>In addition, we support 'extern' to declare functions like 'sin' and 'cos' as
+well as to support forward declaration of user functions. These 'extern's are just
+prototypes with no body:</p>
+
+<div class="doc_code">
+<pre>
+(* external ::= 'extern' prototype *)
+let parse_extern = parser
+ | [&lt; 'Token.Extern; e=parse_prototype &gt;] -&gt; e
+</pre>
+</div>
+
+<p>Finally, we'll also let the user type in arbitrary top-level expressions and
+evaluate them on the fly. We will handle this by defining anonymous nullary
+(zero argument) functions for them:</p>
+
+<div class="doc_code">
+<pre>
+(* toplevelexpr ::= expression *)
+let parse_toplevel = parser
+ | [&lt; e=parse_expr &gt;] -&gt;
+ (* Make an anonymous proto. *)
+ Ast.Function (Ast.Prototype ("", [||]), e)
+</pre>
+</div>
+
+<p>Now that we have all the pieces, let's build a little driver that will let us
+actually <em>execute</em> this code we've built!</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="driver">The Driver</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The driver for this simply invokes all of the parsing pieces with a top-level
+dispatch loop. There isn't much interesting here, so I'll just include the
+top-level loop. See <a href="#code">below</a> for full code in the "Top-Level
+Parsing" section.</p>
+
+<div class="doc_code">
+<pre>
+(* top ::= definition | external | expression | ';' *)
+let rec main_loop stream =
+ match Stream.peek stream with
+ | None -&gt; ()
+
+ (* ignore top-level semicolons. *)
+ | Some (Token.Kwd ';') -&gt;
+ Stream.junk stream;
+ main_loop stream
+
+ | Some token -&gt;
+ begin
+ try match token with
+ | Token.Def -&gt;
+ ignore(Parser.parse_definition stream);
+ print_endline "parsed a function definition.";
+ | Token.Extern -&gt;
+ ignore(Parser.parse_extern stream);
+ print_endline "parsed an extern.";
+ | _ -&gt;
+ (* Evaluate a top-level expression into an anonymous function. *)
+ ignore(Parser.parse_toplevel stream);
+ print_endline "parsed a top-level expr";
+ with Stream.Error s -&gt;
+ (* Skip token for error recovery. *)
+ Stream.junk stream;
+ print_endline s;
+ end;
+ print_string "ready&gt; "; flush stdout;
+ main_loop stream
+</pre>
+</div>
+
+<p>The most interesting part of this is that we ignore top-level semicolons.
+Why is this, you ask? The basic reason is that if you type "4 + 5" at the
+command line, the parser doesn't know whether that is the end of what you will type
+or not. For example, on the next line you could type "def foo..." in which case
+4+5 is the end of a top-level expression. Alternatively you could type "* 6",
+which would continue the expression. Having top-level semicolons allows you to
+type "4+5;", and the parser will know you are done.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="conclusions">Conclusions</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>With just under 300 lines of commented code (240 lines of non-comment,
+non-blank code), we fully defined our minimal language, including a lexer,
+parser, and AST builder. With this done, the executable will validate
+Kaleidoscope code and tell us if it is grammatically invalid. For
+example, here is a sample interaction:</p>
+
+<div class="doc_code">
+<pre>
+$ <b>./toy.byte</b>
+ready&gt; <b>def foo(x y) x+foo(y, 4.0);</b>
+Parsed a function definition.
+ready&gt; <b>def foo(x y) x+y y;</b>
+Parsed a function definition.
+Parsed a top-level expr
+ready&gt; <b>def foo(x y) x+y );</b>
+Parsed a function definition.
+Error: unknown token when expecting an expression
+ready&gt; <b>extern sin(a);</b>
+ready&gt; Parsed an extern
+ready&gt; <b>^D</b>
+$
+</pre>
+</div>
+
+<p>There is a lot of room for extension here. You can define new AST nodes,
+extend the language in many ways, etc. In the <a href="OCamlLangImpl3.html">
+next installment</a>, we will describe how to generate LLVM Intermediate
+Representation (IR) from the AST.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="code">Full Code Listing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Here is the complete code listing for this and the previous chapter.
+Note that it is fully self-contained: you don't need LLVM or any external
+libraries at all for this. (Besides the ocaml standard libraries, of
+course.) To build this, just compile with:</p>
+
+<div class="doc_code">
+<pre>
+# Compile
+ocamlbuild toy.byte
+# Run
+./toy.byte
+</pre>
+</div>
+
+<p>Here is the code:</p>
+
+<dl>
+<dt>_tags:</dt>
+<dd class="doc_code">
+<pre>
+&lt;{lexer,parser}.ml&gt;: use_camlp4, pp(camlp4of)
+</pre>
+</dd>
+
+<dt>token.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Lexer Tokens
+ *===----------------------------------------------------------------------===*)
+
+(* The lexer returns these 'Kwd' if it is an unknown character, otherwise one of
+ * these others for known things. *)
+type token =
+ (* commands *)
+ | Def | Extern
+
+ (* primary *)
+ | Ident of string | Number of float
+
+ (* unknown *)
+ | Kwd of char
+</pre>
+</dd>
+
+<dt>lexer.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Lexer
+ *===----------------------------------------------------------------------===*)
+
+let rec lex = parser
+ (* Skip any whitespace. *)
+ | [&lt; ' (' ' | '\n' | '\r' | '\t'); stream &gt;] -&gt; lex stream
+
+ (* identifier: [a-zA-Z][a-zA-Z0-9] *)
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+
+ (* number: [0-9.]+ *)
+ | [&lt; ' ('0' .. '9' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+
+ (* Comment until end of line. *)
+ | [&lt; ' ('#'); stream &gt;] -&gt;
+ lex_comment stream
+
+ (* Otherwise, just return the character as its ascii value. *)
+ | [&lt; 'c; stream &gt;] -&gt;
+ [&lt; 'Token.Kwd c; lex stream &gt;]
+
+ (* end of stream. *)
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+
+and lex_number buffer = parser
+ | [&lt; ' ('0' .. '9' | '.' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ [&lt; 'Token.Number (float_of_string (Buffer.contents buffer)); stream &gt;]
+
+and lex_ident buffer = parser
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' | '0' .. '9' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ match Buffer.contents buffer with
+ | "def" -&gt; [&lt; 'Token.Def; stream &gt;]
+ | "extern" -&gt; [&lt; 'Token.Extern; stream &gt;]
+ | id -&gt; [&lt; 'Token.Ident id; stream &gt;]
+
+and lex_comment = parser
+ | [&lt; ' ('\n'); stream=lex &gt;] -&gt; stream
+ | [&lt; 'c; e=lex_comment &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+</pre>
+</dd>
+
+<dt>ast.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Abstract Syntax Tree (aka Parse Tree)
+ *===----------------------------------------------------------------------===*)
+
+(* expr - Base type for all expression nodes. *)
+type expr =
+ (* variant for numeric literals like "1.0". *)
+ | Number of float
+
+ (* variant for referencing a variable, like "a". *)
+ | Variable of string
+
+ (* variant for a binary operator. *)
+ | Binary of char * expr * expr
+
+ (* variant for function calls. *)
+ | Call of string * expr array
+
+(* proto - This type represents the "prototype" for a function, which captures
+ * its name, and its argument names (thus implicitly the number of arguments the
+ * function takes). *)
+type proto = Prototype of string * string array
+
+(* func - This type represents a function definition itself. *)
+type func = Function of proto * expr
+</pre>
+</dd>
+
+<dt>parser.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===---------------------------------------------------------------------===
+ * Parser
+ *===---------------------------------------------------------------------===*)
+
+(* binop_precedence - This holds the precedence for each binary operator that is
+ * defined *)
+let binop_precedence:(char, int) Hashtbl.t = Hashtbl.create 10
+
+(* precedence - Get the precedence of the pending binary operator token. *)
+let precedence c = try Hashtbl.find binop_precedence c with Not_found -&gt; -1
+
+(* primary
+ * ::= identifier
+ * ::= numberexpr
+ * ::= parenexpr *)
+let rec parse_primary = parser
+ (* numberexpr ::= number *)
+ | [&lt; 'Token.Number n &gt;] -&gt; Ast.Number n
+
+ (* parenexpr ::= '(' expression ')' *)
+ | [&lt; 'Token.Kwd '('; e=parse_expr; 'Token.Kwd ')' ?? "expected ')'" &gt;] -&gt; e
+
+ (* identifierexpr
+ * ::= identifier
+ * ::= identifier '(' argumentexpr ')' *)
+ | [&lt; 'Token.Ident id; stream &gt;] -&gt;
+ let rec parse_args accumulator = parser
+ | [&lt; e=parse_expr; stream &gt;] -&gt;
+ begin parser
+ | [&lt; 'Token.Kwd ','; e=parse_args (e :: accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; e :: accumulator
+ end stream
+ | [&lt; &gt;] -&gt; accumulator
+ in
+ let rec parse_ident id = parser
+ (* Call. *)
+ | [&lt; 'Token.Kwd '(';
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')'"&gt;] -&gt;
+ Ast.Call (id, Array.of_list (List.rev args))
+
+ (* Simple variable ref. *)
+ | [&lt; &gt;] -&gt; Ast.Variable id
+ in
+ parse_ident id stream
+
+ | [&lt; &gt;] -&gt; raise (Stream.Error "unknown token when expecting an expression.")
+
+(* binoprhs
+ * ::= ('+' primary)* *)
+and parse_bin_rhs expr_prec lhs stream =
+ match Stream.peek stream with
+ (* If this is a binop, find its precedence. *)
+ | Some (Token.Kwd c) when Hashtbl.mem binop_precedence c -&gt;
+ let token_prec = precedence c in
+
+ (* If this is a binop that binds at least as tightly as the current binop,
+ * consume it, otherwise we are done. *)
+ if token_prec &lt; expr_prec then lhs else begin
+ (* Eat the binop. *)
+ Stream.junk stream;
+
+ (* Parse the primary expression after the binary operator. *)
+ let rhs = parse_primary stream in
+
+ (* Okay, we know this is a binop. *)
+ let rhs =
+ match Stream.peek stream with
+ | Some (Token.Kwd c2) -&gt;
+ (* If BinOp binds less tightly with rhs than the operator after
+ * rhs, let the pending operator take rhs as its lhs. *)
+ let next_prec = precedence c2 in
+ if token_prec &lt; next_prec
+ then parse_bin_rhs (token_prec + 1) rhs stream
+ else rhs
+ | _ -&gt; rhs
+ in
+
+ (* Merge lhs/rhs. *)
+ let lhs = Ast.Binary (c, lhs, rhs) in
+ parse_bin_rhs expr_prec lhs stream
+ end
+ | _ -&gt; lhs
+
+(* expression
+ * ::= primary binoprhs *)
+and parse_expr = parser
+ | [&lt; lhs=parse_primary; stream &gt;] -&gt; parse_bin_rhs 0 lhs stream
+
+(* prototype
+ * ::= id '(' id* ')' *)
+let parse_prototype =
+ let rec parse_args accumulator = parser
+ | [&lt; 'Token.Ident id; e=parse_args (id::accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; accumulator
+ in
+
+ parser
+ | [&lt; 'Token.Ident id;
+ 'Token.Kwd '(' ?? "expected '(' in prototype";
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')' in prototype" &gt;] -&gt;
+ (* success. *)
+ Ast.Prototype (id, Array.of_list (List.rev args))
+
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected function name in prototype")
+
+(* definition ::= 'def' prototype expression *)
+let parse_definition = parser
+ | [&lt; 'Token.Def; p=parse_prototype; e=parse_expr &gt;] -&gt;
+ Ast.Function (p, e)
+
+(* toplevelexpr ::= expression *)
+let parse_toplevel = parser
+ | [&lt; e=parse_expr &gt;] -&gt;
+ (* Make an anonymous proto. *)
+ Ast.Function (Ast.Prototype ("", [||]), e)
+
+(* external ::= 'extern' prototype *)
+let parse_extern = parser
+ | [&lt; 'Token.Extern; e=parse_prototype &gt;] -&gt; e
+</pre>
+</dd>
+
+<dt>toplevel.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Top-Level parsing and JIT Driver
+ *===----------------------------------------------------------------------===*)
+
+(* top ::= definition | external | expression | ';' *)
+let rec main_loop stream =
+ match Stream.peek stream with
+ | None -&gt; ()
+
+ (* ignore top-level semicolons. *)
+ | Some (Token.Kwd ';') -&gt;
+ Stream.junk stream;
+ main_loop stream
+
+ | Some token -&gt;
+ begin
+ try match token with
+ | Token.Def -&gt;
+ ignore(Parser.parse_definition stream);
+ print_endline "parsed a function definition.";
+ | Token.Extern -&gt;
+ ignore(Parser.parse_extern stream);
+ print_endline "parsed an extern.";
+ | _ -&gt;
+ (* Evaluate a top-level expression into an anonymous function. *)
+ ignore(Parser.parse_toplevel stream);
+ print_endline "parsed a top-level expr";
+ with Stream.Error s -&gt;
+ (* Skip token for error recovery. *)
+ Stream.junk stream;
+ print_endline s;
+ end;
+ print_string "ready&gt; "; flush stdout;
+ main_loop stream
+</pre>
+</dd>
+
+<dt>toy.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Main driver code.
+ *===----------------------------------------------------------------------===*)
+
+let main () =
+ (* Install standard binary operators.
+ * 1 is the lowest precedence. *)
+ Hashtbl.add Parser.binop_precedence '&lt;' 10;
+ Hashtbl.add Parser.binop_precedence '+' 20;
+ Hashtbl.add Parser.binop_precedence '-' 20;
+ Hashtbl.add Parser.binop_precedence '*' 40; (* highest. *)
+
+ (* Prime the first token. *)
+ print_string "ready&gt; "; flush stdout;
+ let stream = Lexer.lex (Stream.of_channel stdin) in
+
+ (* Run the main "interpreter loop" now. *)
+ Toplevel.main_loop stream;
+;;
+
+main ()
+</pre>
+</dd>
+</dl>
+
+<a href="OCamlLangImpl3.html">Next: Implementing Code Generation to LLVM IR</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a>
+ <a href="mailto:erickt@users.sourceforge.net">Erick Tryzelaar</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/OCamlLangImpl3.html b/docs/tutorial/OCamlLangImpl3.html
new file mode 100644
index 00000000000..a49a0b5d9c6
--- /dev/null
+++ b/docs/tutorial/OCamlLangImpl3.html
@@ -0,0 +1,1093 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Implementing code generation to LLVM IR</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <meta name="author" content="Erick Tryzelaar">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Code generation to LLVM IR</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 3
+ <ol>
+ <li><a href="#intro">Chapter 3 Introduction</a></li>
+ <li><a href="#basics">Code Generation Setup</a></li>
+ <li><a href="#exprs">Expression Code Generation</a></li>
+ <li><a href="#funcs">Function Code Generation</a></li>
+ <li><a href="#driver">Driver Changes and Closing Thoughts</a></li>
+ <li><a href="#code">Full Code Listing</a></li>
+ </ol>
+</li>
+<li><a href="OCamlLangImpl4.html">Chapter 4</a>: Adding JIT and Optimizer
+Support</li>
+</ul>
+
+<div class="doc_author">
+ <p>
+ Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a>
+ and <a href="mailto:idadesub@users.sourceforge.net">Erick Tryzelaar</a>
+ </p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Chapter 3 Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to Chapter 3 of the "<a href="index.html">Implementing a language
+with LLVM</a>" tutorial. This chapter shows you how to transform the <a
+href="OCamlLangImpl2.html">Abstract Syntax Tree</a>, built in Chapter 2, into
+LLVM IR. This will teach you a little bit about how LLVM does things, as well
+as demonstrate how easy it is to use. It's much more work to build a lexer and
+parser than it is to generate LLVM IR code. :)
+</p>
+
+<p><b>Please note</b>: the code in this chapter and later require LLVM 2.3 or
+LLVM SVN to work. LLVM 2.2 and before will not work with it.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="basics">Code Generation Setup</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+In order to generate LLVM IR, we want some simple setup to get started. First
+we define virtual code generation (codegen) methods in each AST class:</p>
+
+<div class="doc_code">
+<pre>
+let rec codegen_expr = function
+ | Ast.Number n -&gt; ...
+ | Ast.Variable name -&gt; ...
+</pre>
+</div>
+
+<p>The <tt>Codegen.codegen_expr</tt> function says to emit IR for that AST node
+along with all the things it depends on, and they all return an LLVM Value
+object. "Value" is the class used to represent a "<a
+href="http://en.wikipedia.org/wiki/Static_single_assignment_form">Static Single
+Assignment (SSA)</a> register" or "SSA value" in LLVM. The most distinct aspect
+of SSA values is that their value is computed as the related instruction
+executes, and it does not get a new value until (and if) the instruction
+re-executes. In other words, there is no way to "change" an SSA value. For
+more information, please read up on <a
+href="http://en.wikipedia.org/wiki/Static_single_assignment_form">Static Single
+Assignment</a> - the concepts are really quite natural once you grok them.</p>
+
+<p>The
+second thing we want is an "Error" exception like we used for the parser, which
+will be used to report errors found during code generation (for example, use of
+an undeclared parameter):</p>
+
+<div class="doc_code">
+<pre>
+exception Error of string
+
+let context = global_context ()
+let the_module = create_module context "my cool jit"
+let builder = builder context
+let named_values:(string, llvalue) Hashtbl.t = Hashtbl.create 10
+let double_type = double_type context
+</pre>
+</div>
+
+<p>The static variables will be used during code generation.
+<tt>Codgen.the_module</tt> is the LLVM construct that contains all of the
+functions and global variables in a chunk of code. In many ways, it is the
+top-level structure that the LLVM IR uses to contain code.</p>
+
+<p>The <tt>Codegen.builder</tt> object is a helper object that makes it easy to
+generate LLVM instructions. Instances of the <a
+href="http://llvm.org/doxygen/IRBuilder_8h-source.html"><tt>IRBuilder</tt></a>
+class keep track of the current place to insert instructions and has methods to
+create new instructions.</p>
+
+<p>The <tt>Codegen.named_values</tt> map keeps track of which values are defined
+in the current scope and what their LLVM representation is. (In other words, it
+is a symbol table for the code). In this form of Kaleidoscope, the only things
+that can be referenced are function parameters. As such, function parameters
+will be in this map when generating code for their function body.</p>
+
+<p>
+With these basics in place, we can start talking about how to generate code for
+each expression. Note that this assumes that the <tt>Codgen.builder</tt> has
+been set up to generate code <em>into</em> something. For now, we'll assume
+that this has already been done, and we'll just use it to emit code.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="exprs">Expression Code Generation</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Generating LLVM code for expression nodes is very straightforward: less
+than 30 lines of commented code for all four of our expression nodes. First
+we'll do numeric literals:</p>
+
+<div class="doc_code">
+<pre>
+ | Ast.Number n -&gt; const_float double_type n
+</pre>
+</div>
+
+<p>In the LLVM IR, numeric constants are represented with the
+<tt>ConstantFP</tt> class, which holds the numeric value in an <tt>APFloat</tt>
+internally (<tt>APFloat</tt> has the capability of holding floating point
+constants of <em>A</em>rbitrary <em>P</em>recision). This code basically just
+creates and returns a <tt>ConstantFP</tt>. Note that in the LLVM IR
+that constants are all uniqued together and shared. For this reason, the API
+uses "the foo::get(..)" idiom instead of "new foo(..)" or "foo::Create(..)".</p>
+
+<div class="doc_code">
+<pre>
+ | Ast.Variable name -&gt;
+ (try Hashtbl.find named_values name with
+ | Not_found -&gt; raise (Error "unknown variable name"))
+</pre>
+</div>
+
+<p>References to variables are also quite simple using LLVM. In the simple
+version of Kaleidoscope, we assume that the variable has already been emitted
+somewhere and its value is available. In practice, the only values that can be
+in the <tt>Codegen.named_values</tt> map are function arguments. This code
+simply checks to see that the specified name is in the map (if not, an unknown
+variable is being referenced) and returns the value for it. In future chapters,
+we'll add support for <a href="LangImpl5.html#for">loop induction variables</a>
+in the symbol table, and for <a href="LangImpl7.html#localvars">local
+variables</a>.</p>
+
+<div class="doc_code">
+<pre>
+ | Ast.Binary (op, lhs, rhs) -&gt;
+ let lhs_val = codegen_expr lhs in
+ let rhs_val = codegen_expr rhs in
+ begin
+ match op with
+ | '+' -&gt; build_fadd lhs_val rhs_val "addtmp" builder
+ | '-' -&gt; build_fsub lhs_val rhs_val "subtmp" builder
+ | '*' -&gt; build_fmul lhs_val rhs_val "multmp" builder
+ | '&lt;' -&gt;
+ (* Convert bool 0/1 to double 0.0 or 1.0 *)
+ let i = build_fcmp Fcmp.Ult lhs_val rhs_val "cmptmp" builder in
+ build_uitofp i double_type "booltmp" builder
+ | _ -&gt; raise (Error "invalid binary operator")
+ end
+</pre>
+</div>
+
+<p>Binary operators start to get more interesting. The basic idea here is that
+we recursively emit code for the left-hand side of the expression, then the
+right-hand side, then we compute the result of the binary expression. In this
+code, we do a simple switch on the opcode to create the right LLVM instruction.
+</p>
+
+<p>In the example above, the LLVM builder class is starting to show its value.
+IRBuilder knows where to insert the newly created instruction, all you have to
+do is specify what instruction to create (e.g. with <tt>Llvm.create_add</tt>),
+which operands to use (<tt>lhs</tt> and <tt>rhs</tt> here) and optionally
+provide a name for the generated instruction.</p>
+
+<p>One nice thing about LLVM is that the name is just a hint. For instance, if
+the code above emits multiple "addtmp" variables, LLVM will automatically
+provide each one with an increasing, unique numeric suffix. Local value names
+for instructions are purely optional, but it makes it much easier to read the
+IR dumps.</p>
+
+<p><a href="../LangRef.html#instref">LLVM instructions</a> are constrained by
+strict rules: for example, the Left and Right operators of
+an <a href="../LangRef.html#i_add">add instruction</a> must have the same
+type, and the result type of the add must match the operand types. Because
+all values in Kaleidoscope are doubles, this makes for very simple code for add,
+sub and mul.</p>
+
+<p>On the other hand, LLVM specifies that the <a
+href="../LangRef.html#i_fcmp">fcmp instruction</a> always returns an 'i1' value
+(a one bit integer). The problem with this is that Kaleidoscope wants the value to be a 0.0 or 1.0 value. In order to get these semantics, we combine the fcmp instruction with
+a <a href="../LangRef.html#i_uitofp">uitofp instruction</a>. This instruction
+converts its input integer into a floating point value by treating the input
+as an unsigned value. In contrast, if we used the <a
+href="../LangRef.html#i_sitofp">sitofp instruction</a>, the Kaleidoscope '&lt;'
+operator would return 0.0 and -1.0, depending on the input value.</p>
+
+<div class="doc_code">
+<pre>
+ | Ast.Call (callee, args) -&gt;
+ (* Look up the name in the module table. *)
+ let callee =
+ match lookup_function callee the_module with
+ | Some callee -&gt; callee
+ | None -&gt; raise (Error "unknown function referenced")
+ in
+ let params = params callee in
+
+ (* If argument mismatch error. *)
+ if Array.length params == Array.length args then () else
+ raise (Error "incorrect # arguments passed");
+ let args = Array.map codegen_expr args in
+ build_call callee args "calltmp" builder
+</pre>
+</div>
+
+<p>Code generation for function calls is quite straightforward with LLVM. The
+code above initially does a function name lookup in the LLVM Module's symbol
+table. Recall that the LLVM Module is the container that holds all of the
+functions we are JIT'ing. By giving each function the same name as what the
+user specifies, we can use the LLVM symbol table to resolve function names for
+us.</p>
+
+<p>Once we have the function to call, we recursively codegen each argument that
+is to be passed in, and create an LLVM <a href="../LangRef.html#i_call">call
+instruction</a>. Note that LLVM uses the native C calling conventions by
+default, allowing these calls to also call into standard library functions like
+"sin" and "cos", with no additional effort.</p>
+
+<p>This wraps up our handling of the four basic expressions that we have so far
+in Kaleidoscope. Feel free to go in and add some more. For example, by
+browsing the <a href="../LangRef.html">LLVM language reference</a> you'll find
+several other interesting instructions that are really easy to plug into our
+basic framework.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="funcs">Function Code Generation</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Code generation for prototypes and functions must handle a number of
+details, which make their code less beautiful than expression code
+generation, but allows us to illustrate some important points. First, lets
+talk about code generation for prototypes: they are used both for function
+bodies and external function declarations. The code starts with:</p>
+
+<div class="doc_code">
+<pre>
+let codegen_proto = function
+ | Ast.Prototype (name, args) -&gt;
+ (* Make the function type: double(double,double) etc. *)
+ let doubles = Array.make (Array.length args) double_type in
+ let ft = function_type double_type doubles in
+ let f =
+ match lookup_function name the_module with
+</pre>
+</div>
+
+<p>This code packs a lot of power into a few lines. Note first that this
+function returns a "Function*" instead of a "Value*" (although at the moment
+they both are modeled by <tt>llvalue</tt> in ocaml). Because a "prototype"
+really talks about the external interface for a function (not the value computed
+by an expression), it makes sense for it to return the LLVM Function it
+corresponds to when codegen'd.</p>
+
+<p>The call to <tt>Llvm.function_type</tt> creates the <tt>Llvm.llvalue</tt>
+that should be used for a given Prototype. Since all function arguments in
+Kaleidoscope are of type double, the first line creates a vector of "N" LLVM
+double types. It then uses the <tt>Llvm.function_type</tt> method to create a
+function type that takes "N" doubles as arguments, returns one double as a
+result, and that is not vararg (that uses the function
+<tt>Llvm.var_arg_function_type</tt>). Note that Types in LLVM are uniqued just
+like <tt>Constant</tt>s are, so you don't "new" a type, you "get" it.</p>
+
+<p>The final line above checks if the function has already been defined in
+<tt>Codegen.the_module</tt>. If not, we will create it.</p>
+
+<div class="doc_code">
+<pre>
+ | None -&gt; declare_function name ft the_module
+</pre>
+</div>
+
+<p>This indicates the type and name to use, as well as which module to insert
+into. By default we assume a function has
+<tt>Llvm.Linkage.ExternalLinkage</tt>. "<a href="LangRef.html#linkage">external
+linkage</a>" means that the function may be defined outside the current module
+and/or that it is callable by functions outside the module. The "<tt>name</tt>"
+passed in is the name the user specified: this name is registered in
+"<tt>Codegen.the_module</tt>"s symbol table, which is used by the function call
+code above.</p>
+
+<p>In Kaleidoscope, I choose to allow redefinitions of functions in two cases:
+first, we want to allow 'extern'ing a function more than once, as long as the
+prototypes for the externs match (since all arguments have the same type, we
+just have to check that the number of arguments match). Second, we want to
+allow 'extern'ing a function and then defining a body for it. This is useful
+when defining mutually recursive functions.</p>
+
+<div class="doc_code">
+<pre>
+ (* If 'f' conflicted, there was already something named 'name'. If it
+ * has a body, don't allow redefinition or reextern. *)
+ | Some f -&gt;
+ (* If 'f' already has a body, reject this. *)
+ if Array.length (basic_blocks f) == 0 then () else
+ raise (Error "redefinition of function");
+
+ (* If 'f' took a different number of arguments, reject. *)
+ if Array.length (params f) == Array.length args then () else
+ raise (Error "redefinition of function with different # args");
+ f
+ in
+</pre>
+</div>
+
+<p>In order to verify the logic above, we first check to see if the pre-existing
+function is "empty". In this case, empty means that it has no basic blocks in
+it, which means it has no body. If it has no body, it is a forward
+declaration. Since we don't allow anything after a full definition of the
+function, the code rejects this case. If the previous reference to a function
+was an 'extern', we simply verify that the number of arguments for that
+definition and this one match up. If not, we emit an error.</p>
+
+<div class="doc_code">
+<pre>
+ (* Set names for all arguments. *)
+ Array.iteri (fun i a -&gt;
+ let n = args.(i) in
+ set_value_name n a;
+ Hashtbl.add named_values n a;
+ ) (params f);
+ f
+</pre>
+</div>
+
+<p>The last bit of code for prototypes loops over all of the arguments in the
+function, setting the name of the LLVM Argument objects to match, and registering
+the arguments in the <tt>Codegen.named_values</tt> map for future use by the
+<tt>Ast.Variable</tt> variant. Once this is set up, it returns the Function
+object to the caller. Note that we don't check for conflicting
+argument names here (e.g. "extern foo(a b a)"). Doing so would be very
+straight-forward with the mechanics we have already used above.</p>
+
+<div class="doc_code">
+<pre>
+let codegen_func = function
+ | Ast.Function (proto, body) -&gt;
+ Hashtbl.clear named_values;
+ let the_function = codegen_proto proto in
+</pre>
+</div>
+
+<p>Code generation for function definitions starts out simply enough: we just
+codegen the prototype (Proto) and verify that it is ok. We then clear out the
+<tt>Codegen.named_values</tt> map to make sure that there isn't anything in it
+from the last function we compiled. Code generation of the prototype ensures
+that there is an LLVM Function object that is ready to go for us.</p>
+
+<div class="doc_code">
+<pre>
+ (* Create a new basic block to start insertion into. *)
+ let bb = append_block context "entry" the_function in
+ position_at_end bb builder;
+
+ try
+ let ret_val = codegen_expr body in
+</pre>
+</div>
+
+<p>Now we get to the point where the <tt>Codegen.builder</tt> is set up. The
+first line creates a new
+<a href="http://en.wikipedia.org/wiki/Basic_block">basic block</a> (named
+"entry"), which is inserted into <tt>the_function</tt>. The second line then
+tells the builder that new instructions should be inserted into the end of the
+new basic block. Basic blocks in LLVM are an important part of functions that
+define the <a
+href="http://en.wikipedia.org/wiki/Control_flow_graph">Control Flow Graph</a>.
+Since we don't have any control flow, our functions will only contain one
+block at this point. We'll fix this in <a href="OCamlLangImpl5.html">Chapter
+5</a> :).</p>
+
+<div class="doc_code">
+<pre>
+ let ret_val = codegen_expr body in
+
+ (* Finish off the function. *)
+ let _ = build_ret ret_val builder in
+
+ (* Validate the generated code, checking for consistency. *)
+ Llvm_analysis.assert_valid_function the_function;
+
+ the_function
+</pre>
+</div>
+
+<p>Once the insertion point is set up, we call the <tt>Codegen.codegen_func</tt>
+method for the root expression of the function. If no error happens, this emits
+code to compute the expression into the entry block and returns the value that
+was computed. Assuming no error, we then create an LLVM <a
+href="../LangRef.html#i_ret">ret instruction</a>, which completes the function.
+Once the function is built, we call
+<tt>Llvm_analysis.assert_valid_function</tt>, which is provided by LLVM. This
+function does a variety of consistency checks on the generated code, to
+determine if our compiler is doing everything right. Using this is important:
+it can catch a lot of bugs. Once the function is finished and validated, we
+return it.</p>
+
+<div class="doc_code">
+<pre>
+ with e -&gt;
+ delete_function the_function;
+ raise e
+</pre>
+</div>
+
+<p>The only piece left here is handling of the error case. For simplicity, we
+handle this by merely deleting the function we produced with the
+<tt>Llvm.delete_function</tt> method. This allows the user to redefine a
+function that they incorrectly typed in before: if we didn't delete it, it
+would live in the symbol table, with a body, preventing future redefinition.</p>
+
+<p>This code does have a bug, though. Since the <tt>Codegen.codegen_proto</tt>
+can return a previously defined forward declaration, our code can actually delete
+a forward declaration. There are a number of ways to fix this bug, see what you
+can come up with! Here is a testcase:</p>
+
+<div class="doc_code">
+<pre>
+extern foo(a b); # ok, defines foo.
+def foo(a b) c; # error, 'c' is invalid.
+def bar() foo(1, 2); # error, unknown function "foo"
+</pre>
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="driver">Driver Changes and Closing Thoughts</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+For now, code generation to LLVM doesn't really get us much, except that we can
+look at the pretty IR calls. The sample code inserts calls to Codegen into the
+"<tt>Toplevel.main_loop</tt>", and then dumps out the LLVM IR. This gives a
+nice way to look at the LLVM IR for simple functions. For example:
+</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>4+5</b>;
+Read top-level expression:
+define double @""() {
+entry:
+ %addtmp = fadd double 4.000000e+00, 5.000000e+00
+ ret double %addtmp
+}
+</pre>
+</div>
+
+<p>Note how the parser turns the top-level expression into anonymous functions
+for us. This will be handy when we add <a href="OCamlLangImpl4.html#jit">JIT
+support</a> in the next chapter. Also note that the code is very literally
+transcribed, no optimizations are being performed. We will
+<a href="OCamlLangImpl4.html#trivialconstfold">add optimizations</a> explicitly
+in the next chapter.</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def foo(a b) a*a + 2*a*b + b*b;</b>
+Read function definition:
+define double @foo(double %a, double %b) {
+entry:
+ %multmp = fmul double %a, %a
+ %multmp1 = fmul double 2.000000e+00, %a
+ %multmp2 = fmul double %multmp1, %b
+ %addtmp = fadd double %multmp, %multmp2
+ %multmp3 = fmul double %b, %b
+ %addtmp4 = fadd double %addtmp, %multmp3
+ ret double %addtmp4
+}
+</pre>
+</div>
+
+<p>This shows some simple arithmetic. Notice the striking similarity to the
+LLVM builder calls that we use to create the instructions.</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def bar(a) foo(a, 4.0) + bar(31337);</b>
+Read function definition:
+define double @bar(double %a) {
+entry:
+ %calltmp = call double @foo(double %a, double 4.000000e+00)
+ %calltmp1 = call double @bar(double 3.133700e+04)
+ %addtmp = fadd double %calltmp, %calltmp1
+ ret double %addtmp
+}
+</pre>
+</div>
+
+<p>This shows some function calls. Note that this function will take a long
+time to execute if you call it. In the future we'll add conditional control
+flow to actually make recursion useful :).</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>extern cos(x);</b>
+Read extern:
+declare double @cos(double)
+
+ready&gt; <b>cos(1.234);</b>
+Read top-level expression:
+define double @""() {
+entry:
+ %calltmp = call double @cos(double 1.234000e+00)
+ ret double %calltmp
+}
+</pre>
+</div>
+
+<p>This shows an extern for the libm "cos" function, and a call to it.</p>
+
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>^D</b>
+; ModuleID = 'my cool jit'
+
+define double @""() {
+entry:
+ %addtmp = fadd double 4.000000e+00, 5.000000e+00
+ ret double %addtmp
+}
+
+define double @foo(double %a, double %b) {
+entry:
+ %multmp = fmul double %a, %a
+ %multmp1 = fmul double 2.000000e+00, %a
+ %multmp2 = fmul double %multmp1, %b
+ %addtmp = fadd double %multmp, %multmp2
+ %multmp3 = fmul double %b, %b
+ %addtmp4 = fadd double %addtmp, %multmp3
+ ret double %addtmp4
+}
+
+define double @bar(double %a) {
+entry:
+ %calltmp = call double @foo(double %a, double 4.000000e+00)
+ %calltmp1 = call double @bar(double 3.133700e+04)
+ %addtmp = fadd double %calltmp, %calltmp1
+ ret double %addtmp
+}
+
+declare double @cos(double)
+
+define double @""() {
+entry:
+ %calltmp = call double @cos(double 1.234000e+00)
+ ret double %calltmp
+}
+</pre>
+</div>
+
+<p>When you quit the current demo, it dumps out the IR for the entire module
+generated. Here you can see the big picture with all the functions referencing
+each other.</p>
+
+<p>This wraps up the third chapter of the Kaleidoscope tutorial. Up next, we'll
+describe how to <a href="OCamlLangImpl4.html">add JIT codegen and optimizer
+support</a> to this so we can actually start running code!</p>
+
+</div>
+
+
+<!-- *********************************************************************** -->
+<h2><a name="code">Full Code Listing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Here is the complete code listing for our running example, enhanced with the
+LLVM code generator. Because this uses the LLVM libraries, we need to link
+them in. To do this, we use the <a
+href="http://llvm.org/cmds/llvm-config.html">llvm-config</a> tool to inform
+our makefile/command line about which options to use:</p>
+
+<div class="doc_code">
+<pre>
+# Compile
+ocamlbuild toy.byte
+# Run
+./toy.byte
+</pre>
+</div>
+
+<p>Here is the code:</p>
+
+<dl>
+<dt>_tags:</dt>
+<dd class="doc_code">
+<pre>
+&lt;{lexer,parser}.ml&gt;: use_camlp4, pp(camlp4of)
+&lt;*.{byte,native}&gt;: g++, use_llvm, use_llvm_analysis
+</pre>
+</dd>
+
+<dt>myocamlbuild.ml:</dt>
+<dd class="doc_code">
+<pre>
+open Ocamlbuild_plugin;;
+
+ocaml_lib ~extern:true "llvm";;
+ocaml_lib ~extern:true "llvm_analysis";;
+
+flag ["link"; "ocaml"; "g++"] (S[A"-cc"; A"g++"]);;
+</pre>
+</dd>
+
+<dt>token.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Lexer Tokens
+ *===----------------------------------------------------------------------===*)
+
+(* The lexer returns these 'Kwd' if it is an unknown character, otherwise one of
+ * these others for known things. *)
+type token =
+ (* commands *)
+ | Def | Extern
+
+ (* primary *)
+ | Ident of string | Number of float
+
+ (* unknown *)
+ | Kwd of char
+</pre>
+</dd>
+
+<dt>lexer.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Lexer
+ *===----------------------------------------------------------------------===*)
+
+let rec lex = parser
+ (* Skip any whitespace. *)
+ | [&lt; ' (' ' | '\n' | '\r' | '\t'); stream &gt;] -&gt; lex stream
+
+ (* identifier: [a-zA-Z][a-zA-Z0-9] *)
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+
+ (* number: [0-9.]+ *)
+ | [&lt; ' ('0' .. '9' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+
+ (* Comment until end of line. *)
+ | [&lt; ' ('#'); stream &gt;] -&gt;
+ lex_comment stream
+
+ (* Otherwise, just return the character as its ascii value. *)
+ | [&lt; 'c; stream &gt;] -&gt;
+ [&lt; 'Token.Kwd c; lex stream &gt;]
+
+ (* end of stream. *)
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+
+and lex_number buffer = parser
+ | [&lt; ' ('0' .. '9' | '.' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ [&lt; 'Token.Number (float_of_string (Buffer.contents buffer)); stream &gt;]
+
+and lex_ident buffer = parser
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' | '0' .. '9' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ match Buffer.contents buffer with
+ | "def" -&gt; [&lt; 'Token.Def; stream &gt;]
+ | "extern" -&gt; [&lt; 'Token.Extern; stream &gt;]
+ | id -&gt; [&lt; 'Token.Ident id; stream &gt;]
+
+and lex_comment = parser
+ | [&lt; ' ('\n'); stream=lex &gt;] -&gt; stream
+ | [&lt; 'c; e=lex_comment &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+</pre>
+</dd>
+
+<dt>ast.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Abstract Syntax Tree (aka Parse Tree)
+ *===----------------------------------------------------------------------===*)
+
+(* expr - Base type for all expression nodes. *)
+type expr =
+ (* variant for numeric literals like "1.0". *)
+ | Number of float
+
+ (* variant for referencing a variable, like "a". *)
+ | Variable of string
+
+ (* variant for a binary operator. *)
+ | Binary of char * expr * expr
+
+ (* variant for function calls. *)
+ | Call of string * expr array
+
+(* proto - This type represents the "prototype" for a function, which captures
+ * its name, and its argument names (thus implicitly the number of arguments the
+ * function takes). *)
+type proto = Prototype of string * string array
+
+(* func - This type represents a function definition itself. *)
+type func = Function of proto * expr
+</pre>
+</dd>
+
+<dt>parser.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===---------------------------------------------------------------------===
+ * Parser
+ *===---------------------------------------------------------------------===*)
+
+(* binop_precedence - This holds the precedence for each binary operator that is
+ * defined *)
+let binop_precedence:(char, int) Hashtbl.t = Hashtbl.create 10
+
+(* precedence - Get the precedence of the pending binary operator token. *)
+let precedence c = try Hashtbl.find binop_precedence c with Not_found -&gt; -1
+
+(* primary
+ * ::= identifier
+ * ::= numberexpr
+ * ::= parenexpr *)
+let rec parse_primary = parser
+ (* numberexpr ::= number *)
+ | [&lt; 'Token.Number n &gt;] -&gt; Ast.Number n
+
+ (* parenexpr ::= '(' expression ')' *)
+ | [&lt; 'Token.Kwd '('; e=parse_expr; 'Token.Kwd ')' ?? "expected ')'" &gt;] -&gt; e
+
+ (* identifierexpr
+ * ::= identifier
+ * ::= identifier '(' argumentexpr ')' *)
+ | [&lt; 'Token.Ident id; stream &gt;] -&gt;
+ let rec parse_args accumulator = parser
+ | [&lt; e=parse_expr; stream &gt;] -&gt;
+ begin parser
+ | [&lt; 'Token.Kwd ','; e=parse_args (e :: accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; e :: accumulator
+ end stream
+ | [&lt; &gt;] -&gt; accumulator
+ in
+ let rec parse_ident id = parser
+ (* Call. *)
+ | [&lt; 'Token.Kwd '(';
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')'"&gt;] -&gt;
+ Ast.Call (id, Array.of_list (List.rev args))
+
+ (* Simple variable ref. *)
+ | [&lt; &gt;] -&gt; Ast.Variable id
+ in
+ parse_ident id stream
+
+ | [&lt; &gt;] -&gt; raise (Stream.Error "unknown token when expecting an expression.")
+
+(* binoprhs
+ * ::= ('+' primary)* *)
+and parse_bin_rhs expr_prec lhs stream =
+ match Stream.peek stream with
+ (* If this is a binop, find its precedence. *)
+ | Some (Token.Kwd c) when Hashtbl.mem binop_precedence c -&gt;
+ let token_prec = precedence c in
+
+ (* If this is a binop that binds at least as tightly as the current binop,
+ * consume it, otherwise we are done. *)
+ if token_prec &lt; expr_prec then lhs else begin
+ (* Eat the binop. *)
+ Stream.junk stream;
+
+ (* Parse the primary expression after the binary operator. *)
+ let rhs = parse_primary stream in
+
+ (* Okay, we know this is a binop. *)
+ let rhs =
+ match Stream.peek stream with
+ | Some (Token.Kwd c2) -&gt;
+ (* If BinOp binds less tightly with rhs than the operator after
+ * rhs, let the pending operator take rhs as its lhs. *)
+ let next_prec = precedence c2 in
+ if token_prec &lt; next_prec
+ then parse_bin_rhs (token_prec + 1) rhs stream
+ else rhs
+ | _ -&gt; rhs
+ in
+
+ (* Merge lhs/rhs. *)
+ let lhs = Ast.Binary (c, lhs, rhs) in
+ parse_bin_rhs expr_prec lhs stream
+ end
+ | _ -&gt; lhs
+
+(* expression
+ * ::= primary binoprhs *)
+and parse_expr = parser
+ | [&lt; lhs=parse_primary; stream &gt;] -&gt; parse_bin_rhs 0 lhs stream
+
+(* prototype
+ * ::= id '(' id* ')' *)
+let parse_prototype =
+ let rec parse_args accumulator = parser
+ | [&lt; 'Token.Ident id; e=parse_args (id::accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; accumulator
+ in
+
+ parser
+ | [&lt; 'Token.Ident id;
+ 'Token.Kwd '(' ?? "expected '(' in prototype";
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')' in prototype" &gt;] -&gt;
+ (* success. *)
+ Ast.Prototype (id, Array.of_list (List.rev args))
+
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected function name in prototype")
+
+(* definition ::= 'def' prototype expression *)
+let parse_definition = parser
+ | [&lt; 'Token.Def; p=parse_prototype; e=parse_expr &gt;] -&gt;
+ Ast.Function (p, e)
+
+(* toplevelexpr ::= expression *)
+let parse_toplevel = parser
+ | [&lt; e=parse_expr &gt;] -&gt;
+ (* Make an anonymous proto. *)
+ Ast.Function (Ast.Prototype ("", [||]), e)
+
+(* external ::= 'extern' prototype *)
+let parse_extern = parser
+ | [&lt; 'Token.Extern; e=parse_prototype &gt;] -&gt; e
+</pre>
+</dd>
+
+<dt>codegen.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Code Generation
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+
+exception Error of string
+
+let context = global_context ()
+let the_module = create_module context "my cool jit"
+let builder = builder context
+let named_values:(string, llvalue) Hashtbl.t = Hashtbl.create 10
+let double_type = double_type context
+
+let rec codegen_expr = function
+ | Ast.Number n -&gt; const_float double_type n
+ | Ast.Variable name -&gt;
+ (try Hashtbl.find named_values name with
+ | Not_found -&gt; raise (Error "unknown variable name"))
+ | Ast.Binary (op, lhs, rhs) -&gt;
+ let lhs_val = codegen_expr lhs in
+ let rhs_val = codegen_expr rhs in
+ begin
+ match op with
+ | '+' -&gt; build_add lhs_val rhs_val "addtmp" builder
+ | '-' -&gt; build_sub lhs_val rhs_val "subtmp" builder
+ | '*' -&gt; build_mul lhs_val rhs_val "multmp" builder
+ | '&lt;' -&gt;
+ (* Convert bool 0/1 to double 0.0 or 1.0 *)
+ let i = build_fcmp Fcmp.Ult lhs_val rhs_val "cmptmp" builder in
+ build_uitofp i double_type "booltmp" builder
+ | _ -&gt; raise (Error "invalid binary operator")
+ end
+ | Ast.Call (callee, args) -&gt;
+ (* Look up the name in the module table. *)
+ let callee =
+ match lookup_function callee the_module with
+ | Some callee -&gt; callee
+ | None -&gt; raise (Error "unknown function referenced")
+ in
+ let params = params callee in
+
+ (* If argument mismatch error. *)
+ if Array.length params == Array.length args then () else
+ raise (Error "incorrect # arguments passed");
+ let args = Array.map codegen_expr args in
+ build_call callee args "calltmp" builder
+
+let codegen_proto = function
+ | Ast.Prototype (name, args) -&gt;
+ (* Make the function type: double(double,double) etc. *)
+ let doubles = Array.make (Array.length args) double_type in
+ let ft = function_type double_type doubles in
+ let f =
+ match lookup_function name the_module with
+ | None -&gt; declare_function name ft the_module
+
+ (* If 'f' conflicted, there was already something named 'name'. If it
+ * has a body, don't allow redefinition or reextern. *)
+ | Some f -&gt;
+ (* If 'f' already has a body, reject this. *)
+ if block_begin f &lt;&gt; At_end f then
+ raise (Error "redefinition of function");
+
+ (* If 'f' took a different number of arguments, reject. *)
+ if element_type (type_of f) &lt;&gt; ft then
+ raise (Error "redefinition of function with different # args");
+ f
+ in
+
+ (* Set names for all arguments. *)
+ Array.iteri (fun i a -&gt;
+ let n = args.(i) in
+ set_value_name n a;
+ Hashtbl.add named_values n a;
+ ) (params f);
+ f
+
+let codegen_func = function
+ | Ast.Function (proto, body) -&gt;
+ Hashtbl.clear named_values;
+ let the_function = codegen_proto proto in
+
+ (* Create a new basic block to start insertion into. *)
+ let bb = append_block context "entry" the_function in
+ position_at_end bb builder;
+
+ try
+ let ret_val = codegen_expr body in
+
+ (* Finish off the function. *)
+ let _ = build_ret ret_val builder in
+
+ (* Validate the generated code, checking for consistency. *)
+ Llvm_analysis.assert_valid_function the_function;
+
+ the_function
+ with e -&gt;
+ delete_function the_function;
+ raise e
+</pre>
+</dd>
+
+<dt>toplevel.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Top-Level parsing and JIT Driver
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+
+(* top ::= definition | external | expression | ';' *)
+let rec main_loop stream =
+ match Stream.peek stream with
+ | None -&gt; ()
+
+ (* ignore top-level semicolons. *)
+ | Some (Token.Kwd ';') -&gt;
+ Stream.junk stream;
+ main_loop stream
+
+ | Some token -&gt;
+ begin
+ try match token with
+ | Token.Def -&gt;
+ let e = Parser.parse_definition stream in
+ print_endline "parsed a function definition.";
+ dump_value (Codegen.codegen_func e);
+ | Token.Extern -&gt;
+ let e = Parser.parse_extern stream in
+ print_endline "parsed an extern.";
+ dump_value (Codegen.codegen_proto e);
+ | _ -&gt;
+ (* Evaluate a top-level expression into an anonymous function. *)
+ let e = Parser.parse_toplevel stream in
+ print_endline "parsed a top-level expr";
+ dump_value (Codegen.codegen_func e);
+ with Stream.Error s | Codegen.Error s -&gt;
+ (* Skip token for error recovery. *)
+ Stream.junk stream;
+ print_endline s;
+ end;
+ print_string "ready&gt; "; flush stdout;
+ main_loop stream
+</pre>
+</dd>
+
+<dt>toy.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Main driver code.
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+
+let main () =
+ (* Install standard binary operators.
+ * 1 is the lowest precedence. *)
+ Hashtbl.add Parser.binop_precedence '&lt;' 10;
+ Hashtbl.add Parser.binop_precedence '+' 20;
+ Hashtbl.add Parser.binop_precedence '-' 20;
+ Hashtbl.add Parser.binop_precedence '*' 40; (* highest. *)
+
+ (* Prime the first token. *)
+ print_string "ready&gt; "; flush stdout;
+ let stream = Lexer.lex (Stream.of_channel stdin) in
+
+ (* Run the main "interpreter loop" now. *)
+ Toplevel.main_loop stream;
+
+ (* Print out all the generated code. *)
+ dump_module Codegen.the_module
+;;
+
+main ()
+</pre>
+</dd>
+</dl>
+
+<a href="OCamlLangImpl4.html">Next: Adding JIT and Optimizer Support</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="mailto:idadesub@users.sourceforge.net">Erick Tryzelaar</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/OCamlLangImpl4.html b/docs/tutorial/OCamlLangImpl4.html
new file mode 100644
index 00000000000..ca427eb0e08
--- /dev/null
+++ b/docs/tutorial/OCamlLangImpl4.html
@@ -0,0 +1,1026 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Adding JIT and Optimizer Support</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <meta name="author" content="Erick Tryzelaar">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Adding JIT and Optimizer Support</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 4
+ <ol>
+ <li><a href="#intro">Chapter 4 Introduction</a></li>
+ <li><a href="#trivialconstfold">Trivial Constant Folding</a></li>
+ <li><a href="#optimizerpasses">LLVM Optimization Passes</a></li>
+ <li><a href="#jit">Adding a JIT Compiler</a></li>
+ <li><a href="#code">Full Code Listing</a></li>
+ </ol>
+</li>
+<li><a href="OCamlLangImpl5.html">Chapter 5</a>: Extending the Language: Control
+Flow</li>
+</ul>
+
+<div class="doc_author">
+ <p>
+ Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a>
+ and <a href="mailto:idadesub@users.sourceforge.net">Erick Tryzelaar</a>
+ </p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Chapter 4 Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to Chapter 4 of the "<a href="index.html">Implementing a language
+with LLVM</a>" tutorial. Chapters 1-3 described the implementation of a simple
+language and added support for generating LLVM IR. This chapter describes
+two new techniques: adding optimizer support to your language, and adding JIT
+compiler support. These additions will demonstrate how to get nice, efficient code
+for the Kaleidoscope language.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="trivialconstfold">Trivial Constant Folding</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p><b>Note:</b> the default <tt>IRBuilder</tt> now always includes the constant
+folding optimisations below.<p>
+
+<p>
+Our demonstration for Chapter 3 is elegant and easy to extend. Unfortunately,
+it does not produce wonderful code. For example, when compiling simple code,
+we don't get obvious optimizations:</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def test(x) 1+2+x;</b>
+Read function definition:
+define double @test(double %x) {
+entry:
+ %addtmp = fadd double 1.000000e+00, 2.000000e+00
+ %addtmp1 = fadd double %addtmp, %x
+ ret double %addtmp1
+}
+</pre>
+</div>
+
+<p>This code is a very, very literal transcription of the AST built by parsing
+the input. As such, this transcription lacks optimizations like constant folding
+(we'd like to get "<tt>add x, 3.0</tt>" in the example above) as well as other
+more important optimizations. Constant folding, in particular, is a very common
+and very important optimization: so much so that many language implementors
+implement constant folding support in their AST representation.</p>
+
+<p>With LLVM, you don't need this support in the AST. Since all calls to build
+LLVM IR go through the LLVM builder, it would be nice if the builder itself
+checked to see if there was a constant folding opportunity when you call it.
+If so, it could just do the constant fold and return the constant instead of
+creating an instruction. This is exactly what the <tt>LLVMFoldingBuilder</tt>
+class does.
+
+<p>All we did was switch from <tt>LLVMBuilder</tt> to
+<tt>LLVMFoldingBuilder</tt>. Though we change no other code, we now have all of our
+instructions implicitly constant folded without us having to do anything
+about it. For example, the input above now compiles to:</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def test(x) 1+2+x;</b>
+Read function definition:
+define double @test(double %x) {
+entry:
+ %addtmp = fadd double 3.000000e+00, %x
+ ret double %addtmp
+}
+</pre>
+</div>
+
+<p>Well, that was easy :). In practice, we recommend always using
+<tt>LLVMFoldingBuilder</tt> when generating code like this. It has no
+"syntactic overhead" for its use (you don't have to uglify your compiler with
+constant checks everywhere) and it can dramatically reduce the amount of
+LLVM IR that is generated in some cases (particular for languages with a macro
+preprocessor or that use a lot of constants).</p>
+
+<p>On the other hand, the <tt>LLVMFoldingBuilder</tt> is limited by the fact
+that it does all of its analysis inline with the code as it is built. If you
+take a slightly more complex example:</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def test(x) (1+2+x)*(x+(1+2));</b>
+ready&gt; Read function definition:
+define double @test(double %x) {
+entry:
+ %addtmp = fadd double 3.000000e+00, %x
+ %addtmp1 = fadd double %x, 3.000000e+00
+ %multmp = fmul double %addtmp, %addtmp1
+ ret double %multmp
+}
+</pre>
+</div>
+
+<p>In this case, the LHS and RHS of the multiplication are the same value. We'd
+really like to see this generate "<tt>tmp = x+3; result = tmp*tmp;</tt>" instead
+of computing "<tt>x*3</tt>" twice.</p>
+
+<p>Unfortunately, no amount of local analysis will be able to detect and correct
+this. This requires two transformations: reassociation of expressions (to
+make the add's lexically identical) and Common Subexpression Elimination (CSE)
+to delete the redundant add instruction. Fortunately, LLVM provides a broad
+range of optimizations that you can use, in the form of "passes".</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="optimizerpasses">LLVM Optimization Passes</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>LLVM provides many optimization passes, which do many different sorts of
+things and have different tradeoffs. Unlike other systems, LLVM doesn't hold
+to the mistaken notion that one set of optimizations is right for all languages
+and for all situations. LLVM allows a compiler implementor to make complete
+decisions about what optimizations to use, in which order, and in what
+situation.</p>
+
+<p>As a concrete example, LLVM supports both "whole module" passes, which look
+across as large of body of code as they can (often a whole file, but if run
+at link time, this can be a substantial portion of the whole program). It also
+supports and includes "per-function" passes which just operate on a single
+function at a time, without looking at other functions. For more information
+on passes and how they are run, see the <a href="../WritingAnLLVMPass.html">How
+to Write a Pass</a> document and the <a href="../Passes.html">List of LLVM
+Passes</a>.</p>
+
+<p>For Kaleidoscope, we are currently generating functions on the fly, one at
+a time, as the user types them in. We aren't shooting for the ultimate
+optimization experience in this setting, but we also want to catch the easy and
+quick stuff where possible. As such, we will choose to run a few per-function
+optimizations as the user types the function in. If we wanted to make a "static
+Kaleidoscope compiler", we would use exactly the code we have now, except that
+we would defer running the optimizer until the entire file has been parsed.</p>
+
+<p>In order to get per-function optimizations going, we need to set up a
+<a href="../WritingAnLLVMPass.html#passmanager">Llvm.PassManager</a> to hold and
+organize the LLVM optimizations that we want to run. Once we have that, we can
+add a set of optimizations to run. The code looks like this:</p>
+
+<div class="doc_code">
+<pre>
+ (* Create the JIT. *)
+ let the_execution_engine = ExecutionEngine.create Codegen.the_module in
+ let the_fpm = PassManager.create_function Codegen.the_module in
+
+ (* Set up the optimizer pipeline. Start with registering info about how the
+ * target lays out data structures. *)
+ TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm;
+
+ (* Do simple "peephole" optimizations and bit-twiddling optzn. *)
+ add_instruction_combining the_fpm;
+
+ (* reassociate expressions. *)
+ add_reassociation the_fpm;
+
+ (* Eliminate Common SubExpressions. *)
+ add_gvn the_fpm;
+
+ (* Simplify the control flow graph (deleting unreachable blocks, etc). *)
+ add_cfg_simplification the_fpm;
+
+ ignore (PassManager.initialize the_fpm);
+
+ (* Run the main "interpreter loop" now. *)
+ Toplevel.main_loop the_fpm the_execution_engine stream;
+</pre>
+</div>
+
+<p>The meat of the matter here, is the definition of "<tt>the_fpm</tt>". It
+requires a pointer to the <tt>the_module</tt> to construct itself. Once it is
+set up, we use a series of "add" calls to add a bunch of LLVM passes. The
+first pass is basically boilerplate, it adds a pass so that later optimizations
+know how the data structures in the program are laid out. The
+"<tt>the_execution_engine</tt>" variable is related to the JIT, which we will
+get to in the next section.</p>
+
+<p>In this case, we choose to add 4 optimization passes. The passes we chose
+here are a pretty standard set of "cleanup" optimizations that are useful for
+a wide variety of code. I won't delve into what they do but, believe me,
+they are a good starting place :).</p>
+
+<p>Once the <tt>Llvm.PassManager.</tt> is set up, we need to make use of it.
+We do this by running it after our newly created function is constructed (in
+<tt>Codegen.codegen_func</tt>), but before it is returned to the client:</p>
+
+<div class="doc_code">
+<pre>
+let codegen_func the_fpm = function
+ ...
+ try
+ let ret_val = codegen_expr body in
+
+ (* Finish off the function. *)
+ let _ = build_ret ret_val builder in
+
+ (* Validate the generated code, checking for consistency. *)
+ Llvm_analysis.assert_valid_function the_function;
+
+ (* Optimize the function. *)
+ let _ = PassManager.run_function the_function the_fpm in
+
+ the_function
+</pre>
+</div>
+
+<p>As you can see, this is pretty straightforward. The <tt>the_fpm</tt>
+optimizes and updates the LLVM Function* in place, improving (hopefully) its
+body. With this in place, we can try our test above again:</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def test(x) (1+2+x)*(x+(1+2));</b>
+ready&gt; Read function definition:
+define double @test(double %x) {
+entry:
+ %addtmp = fadd double %x, 3.000000e+00
+ %multmp = fmul double %addtmp, %addtmp
+ ret double %multmp
+}
+</pre>
+</div>
+
+<p>As expected, we now get our nicely optimized code, saving a floating point
+add instruction from every execution of this function.</p>
+
+<p>LLVM provides a wide variety of optimizations that can be used in certain
+circumstances. Some <a href="../Passes.html">documentation about the various
+passes</a> is available, but it isn't very complete. Another good source of
+ideas can come from looking at the passes that <tt>Clang</tt> runs to get
+started. The "<tt>opt</tt>" tool allows you to experiment with passes from the
+command line, so you can see if they do anything.</p>
+
+<p>Now that we have reasonable code coming out of our front-end, lets talk about
+executing it!</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="jit">Adding a JIT Compiler</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Code that is available in LLVM IR can have a wide variety of tools
+applied to it. For example, you can run optimizations on it (as we did above),
+you can dump it out in textual or binary forms, you can compile the code to an
+assembly file (.s) for some target, or you can JIT compile it. The nice thing
+about the LLVM IR representation is that it is the "common currency" between
+many different parts of the compiler.
+</p>
+
+<p>In this section, we'll add JIT compiler support to our interpreter. The
+basic idea that we want for Kaleidoscope is to have the user enter function
+bodies as they do now, but immediately evaluate the top-level expressions they
+type in. For example, if they type in "1 + 2;", we should evaluate and print
+out 3. If they define a function, they should be able to call it from the
+command line.</p>
+
+<p>In order to do this, we first declare and initialize the JIT. This is done
+by adding a global variable and a call in <tt>main</tt>:</p>
+
+<div class="doc_code">
+<pre>
+...
+let main () =
+ ...
+ <b>(* Create the JIT. *)
+ let the_execution_engine = ExecutionEngine.create Codegen.the_module in</b>
+ ...
+</pre>
+</div>
+
+<p>This creates an abstract "Execution Engine" which can be either a JIT
+compiler or the LLVM interpreter. LLVM will automatically pick a JIT compiler
+for you if one is available for your platform, otherwise it will fall back to
+the interpreter.</p>
+
+<p>Once the <tt>Llvm_executionengine.ExecutionEngine.t</tt> is created, the JIT
+is ready to be used. There are a variety of APIs that are useful, but the
+simplest one is the "<tt>Llvm_executionengine.ExecutionEngine.run_function</tt>"
+function. This method JIT compiles the specified LLVM Function and returns a
+function pointer to the generated machine code. In our case, this means that we
+can change the code that parses a top-level expression to look like this:</p>
+
+<div class="doc_code">
+<pre>
+ (* Evaluate a top-level expression into an anonymous function. *)
+ let e = Parser.parse_toplevel stream in
+ print_endline "parsed a top-level expr";
+ let the_function = Codegen.codegen_func the_fpm e in
+ dump_value the_function;
+
+ (* JIT the function, returning a function pointer. *)
+ let result = ExecutionEngine.run_function the_function [||]
+ the_execution_engine in
+
+ print_string "Evaluated to ";
+ print_float (GenericValue.as_float Codegen.double_type result);
+ print_newline ();
+</pre>
+</div>
+
+<p>Recall that we compile top-level expressions into a self-contained LLVM
+function that takes no arguments and returns the computed double. Because the
+LLVM JIT compiler matches the native platform ABI, this means that you can just
+cast the result pointer to a function pointer of that type and call it directly.
+This means, there is no difference between JIT compiled code and native machine
+code that is statically linked into your application.</p>
+
+<p>With just these two changes, lets see how Kaleidoscope works now!</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>4+5;</b>
+define double @""() {
+entry:
+ ret double 9.000000e+00
+}
+
+<em>Evaluated to 9.000000</em>
+</pre>
+</div>
+
+<p>Well this looks like it is basically working. The dump of the function
+shows the "no argument function that always returns double" that we synthesize
+for each top level expression that is typed in. This demonstrates very basic
+functionality, but can we do more?</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>def testfunc(x y) x + y*2; </b>
+Read function definition:
+define double @testfunc(double %x, double %y) {
+entry:
+ %multmp = fmul double %y, 2.000000e+00
+ %addtmp = fadd double %multmp, %x
+ ret double %addtmp
+}
+
+ready&gt; <b>testfunc(4, 10);</b>
+define double @""() {
+entry:
+ %calltmp = call double @testfunc(double 4.000000e+00, double 1.000000e+01)
+ ret double %calltmp
+}
+
+<em>Evaluated to 24.000000</em>
+</pre>
+</div>
+
+<p>This illustrates that we can now call user code, but there is something a bit
+subtle going on here. Note that we only invoke the JIT on the anonymous
+functions that <em>call testfunc</em>, but we never invoked it
+on <em>testfunc</em> itself. What actually happened here is that the JIT
+scanned for all non-JIT'd functions transitively called from the anonymous
+function and compiled all of them before returning
+from <tt>run_function</tt>.</p>
+
+<p>The JIT provides a number of other more advanced interfaces for things like
+freeing allocated machine code, rejit'ing functions to update them, etc.
+However, even with this simple code, we get some surprisingly powerful
+capabilities - check this out (I removed the dump of the anonymous functions,
+you should get the idea by now :) :</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>extern sin(x);</b>
+Read extern:
+declare double @sin(double)
+
+ready&gt; <b>extern cos(x);</b>
+Read extern:
+declare double @cos(double)
+
+ready&gt; <b>sin(1.0);</b>
+<em>Evaluated to 0.841471</em>
+
+ready&gt; <b>def foo(x) sin(x)*sin(x) + cos(x)*cos(x);</b>
+Read function definition:
+define double @foo(double %x) {
+entry:
+ %calltmp = call double @sin(double %x)
+ %multmp = fmul double %calltmp, %calltmp
+ %calltmp2 = call double @cos(double %x)
+ %multmp4 = fmul double %calltmp2, %calltmp2
+ %addtmp = fadd double %multmp, %multmp4
+ ret double %addtmp
+}
+
+ready&gt; <b>foo(4.0);</b>
+<em>Evaluated to 1.000000</em>
+</pre>
+</div>
+
+<p>Whoa, how does the JIT know about sin and cos? The answer is surprisingly
+simple: in this example, the JIT started execution of a function and got to a
+function call. It realized that the function was not yet JIT compiled and
+invoked the standard set of routines to resolve the function. In this case,
+there is no body defined for the function, so the JIT ended up calling
+"<tt>dlsym("sin")</tt>" on the Kaleidoscope process itself. Since
+"<tt>sin</tt>" is defined within the JIT's address space, it simply patches up
+calls in the module to call the libm version of <tt>sin</tt> directly.</p>
+
+<p>The LLVM JIT provides a number of interfaces (look in the
+<tt>llvm_executionengine.mli</tt> file) for controlling how unknown functions
+get resolved. It allows you to establish explicit mappings between IR objects
+and addresses (useful for LLVM global variables that you want to map to static
+tables, for example), allows you to dynamically decide on the fly based on the
+function name, and even allows you to have the JIT compile functions lazily the
+first time they're called.</p>
+
+<p>One interesting application of this is that we can now extend the language
+by writing arbitrary C code to implement operations. For example, if we add:
+</p>
+
+<div class="doc_code">
+<pre>
+/* putchard - putchar that takes a double and returns 0. */
+extern "C"
+double putchard(double X) {
+ putchar((char)X);
+ return 0;
+}
+</pre>
+</div>
+
+<p>Now we can produce simple output to the console by using things like:
+"<tt>extern putchard(x); putchard(120);</tt>", which prints a lowercase 'x' on
+the console (120 is the ASCII code for 'x'). Similar code could be used to
+implement file I/O, console input, and many other capabilities in
+Kaleidoscope.</p>
+
+<p>This completes the JIT and optimizer chapter of the Kaleidoscope tutorial. At
+this point, we can compile a non-Turing-complete programming language, optimize
+and JIT compile it in a user-driven way. Next up we'll look into <a
+href="OCamlLangImpl5.html">extending the language with control flow
+constructs</a>, tackling some interesting LLVM IR issues along the way.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="code">Full Code Listing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Here is the complete code listing for our running example, enhanced with the
+LLVM JIT and optimizer. To build this example, use:
+</p>
+
+<div class="doc_code">
+<pre>
+# Compile
+ocamlbuild toy.byte
+# Run
+./toy.byte
+</pre>
+</div>
+
+<p>Here is the code:</p>
+
+<dl>
+<dt>_tags:</dt>
+<dd class="doc_code">
+<pre>
+&lt;{lexer,parser}.ml&gt;: use_camlp4, pp(camlp4of)
+&lt;*.{byte,native}&gt;: g++, use_llvm, use_llvm_analysis
+&lt;*.{byte,native}&gt;: use_llvm_executionengine, use_llvm_target
+&lt;*.{byte,native}&gt;: use_llvm_scalar_opts, use_bindings
+</pre>
+</dd>
+
+<dt>myocamlbuild.ml:</dt>
+<dd class="doc_code">
+<pre>
+open Ocamlbuild_plugin;;
+
+ocaml_lib ~extern:true "llvm";;
+ocaml_lib ~extern:true "llvm_analysis";;
+ocaml_lib ~extern:true "llvm_executionengine";;
+ocaml_lib ~extern:true "llvm_target";;
+ocaml_lib ~extern:true "llvm_scalar_opts";;
+
+flag ["link"; "ocaml"; "g++"] (S[A"-cc"; A"g++"]);;
+dep ["link"; "ocaml"; "use_bindings"] ["bindings.o"];;
+</pre>
+</dd>
+
+<dt>token.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Lexer Tokens
+ *===----------------------------------------------------------------------===*)
+
+(* The lexer returns these 'Kwd' if it is an unknown character, otherwise one of
+ * these others for known things. *)
+type token =
+ (* commands *)
+ | Def | Extern
+
+ (* primary *)
+ | Ident of string | Number of float
+
+ (* unknown *)
+ | Kwd of char
+</pre>
+</dd>
+
+<dt>lexer.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Lexer
+ *===----------------------------------------------------------------------===*)
+
+let rec lex = parser
+ (* Skip any whitespace. *)
+ | [&lt; ' (' ' | '\n' | '\r' | '\t'); stream &gt;] -&gt; lex stream
+
+ (* identifier: [a-zA-Z][a-zA-Z0-9] *)
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+
+ (* number: [0-9.]+ *)
+ | [&lt; ' ('0' .. '9' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+
+ (* Comment until end of line. *)
+ | [&lt; ' ('#'); stream &gt;] -&gt;
+ lex_comment stream
+
+ (* Otherwise, just return the character as its ascii value. *)
+ | [&lt; 'c; stream &gt;] -&gt;
+ [&lt; 'Token.Kwd c; lex stream &gt;]
+
+ (* end of stream. *)
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+
+and lex_number buffer = parser
+ | [&lt; ' ('0' .. '9' | '.' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ [&lt; 'Token.Number (float_of_string (Buffer.contents buffer)); stream &gt;]
+
+and lex_ident buffer = parser
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' | '0' .. '9' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ match Buffer.contents buffer with
+ | "def" -&gt; [&lt; 'Token.Def; stream &gt;]
+ | "extern" -&gt; [&lt; 'Token.Extern; stream &gt;]
+ | id -&gt; [&lt; 'Token.Ident id; stream &gt;]
+
+and lex_comment = parser
+ | [&lt; ' ('\n'); stream=lex &gt;] -&gt; stream
+ | [&lt; 'c; e=lex_comment &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+</pre>
+</dd>
+
+<dt>ast.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Abstract Syntax Tree (aka Parse Tree)
+ *===----------------------------------------------------------------------===*)
+
+(* expr - Base type for all expression nodes. *)
+type expr =
+ (* variant for numeric literals like "1.0". *)
+ | Number of float
+
+ (* variant for referencing a variable, like "a". *)
+ | Variable of string
+
+ (* variant for a binary operator. *)
+ | Binary of char * expr * expr
+
+ (* variant for function calls. *)
+ | Call of string * expr array
+
+(* proto - This type represents the "prototype" for a function, which captures
+ * its name, and its argument names (thus implicitly the number of arguments the
+ * function takes). *)
+type proto = Prototype of string * string array
+
+(* func - This type represents a function definition itself. *)
+type func = Function of proto * expr
+</pre>
+</dd>
+
+<dt>parser.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===---------------------------------------------------------------------===
+ * Parser
+ *===---------------------------------------------------------------------===*)
+
+(* binop_precedence - This holds the precedence for each binary operator that is
+ * defined *)
+let binop_precedence:(char, int) Hashtbl.t = Hashtbl.create 10
+
+(* precedence - Get the precedence of the pending binary operator token. *)
+let precedence c = try Hashtbl.find binop_precedence c with Not_found -&gt; -1
+
+(* primary
+ * ::= identifier
+ * ::= numberexpr
+ * ::= parenexpr *)
+let rec parse_primary = parser
+ (* numberexpr ::= number *)
+ | [&lt; 'Token.Number n &gt;] -&gt; Ast.Number n
+
+ (* parenexpr ::= '(' expression ')' *)
+ | [&lt; 'Token.Kwd '('; e=parse_expr; 'Token.Kwd ')' ?? "expected ')'" &gt;] -&gt; e
+
+ (* identifierexpr
+ * ::= identifier
+ * ::= identifier '(' argumentexpr ')' *)
+ | [&lt; 'Token.Ident id; stream &gt;] -&gt;
+ let rec parse_args accumulator = parser
+ | [&lt; e=parse_expr; stream &gt;] -&gt;
+ begin parser
+ | [&lt; 'Token.Kwd ','; e=parse_args (e :: accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; e :: accumulator
+ end stream
+ | [&lt; &gt;] -&gt; accumulator
+ in
+ let rec parse_ident id = parser
+ (* Call. *)
+ | [&lt; 'Token.Kwd '(';
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')'"&gt;] -&gt;
+ Ast.Call (id, Array.of_list (List.rev args))
+
+ (* Simple variable ref. *)
+ | [&lt; &gt;] -&gt; Ast.Variable id
+ in
+ parse_ident id stream
+
+ | [&lt; &gt;] -&gt; raise (Stream.Error "unknown token when expecting an expression.")
+
+(* binoprhs
+ * ::= ('+' primary)* *)
+and parse_bin_rhs expr_prec lhs stream =
+ match Stream.peek stream with
+ (* If this is a binop, find its precedence. *)
+ | Some (Token.Kwd c) when Hashtbl.mem binop_precedence c -&gt;
+ let token_prec = precedence c in
+
+ (* If this is a binop that binds at least as tightly as the current binop,
+ * consume it, otherwise we are done. *)
+ if token_prec &lt; expr_prec then lhs else begin
+ (* Eat the binop. *)
+ Stream.junk stream;
+
+ (* Parse the primary expression after the binary operator. *)
+ let rhs = parse_primary stream in
+
+ (* Okay, we know this is a binop. *)
+ let rhs =
+ match Stream.peek stream with
+ | Some (Token.Kwd c2) -&gt;
+ (* If BinOp binds less tightly with rhs than the operator after
+ * rhs, let the pending operator take rhs as its lhs. *)
+ let next_prec = precedence c2 in
+ if token_prec &lt; next_prec
+ then parse_bin_rhs (token_prec + 1) rhs stream
+ else rhs
+ | _ -&gt; rhs
+ in
+
+ (* Merge lhs/rhs. *)
+ let lhs = Ast.Binary (c, lhs, rhs) in
+ parse_bin_rhs expr_prec lhs stream
+ end
+ | _ -&gt; lhs
+
+(* expression
+ * ::= primary binoprhs *)
+and parse_expr = parser
+ | [&lt; lhs=parse_primary; stream &gt;] -&gt; parse_bin_rhs 0 lhs stream
+
+(* prototype
+ * ::= id '(' id* ')' *)
+let parse_prototype =
+ let rec parse_args accumulator = parser
+ | [&lt; 'Token.Ident id; e=parse_args (id::accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; accumulator
+ in
+
+ parser
+ | [&lt; 'Token.Ident id;
+ 'Token.Kwd '(' ?? "expected '(' in prototype";
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')' in prototype" &gt;] -&gt;
+ (* success. *)
+ Ast.Prototype (id, Array.of_list (List.rev args))
+
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected function name in prototype")
+
+(* definition ::= 'def' prototype expression *)
+let parse_definition = parser
+ | [&lt; 'Token.Def; p=parse_prototype; e=parse_expr &gt;] -&gt;
+ Ast.Function (p, e)
+
+(* toplevelexpr ::= expression *)
+let parse_toplevel = parser
+ | [&lt; e=parse_expr &gt;] -&gt;
+ (* Make an anonymous proto. *)
+ Ast.Function (Ast.Prototype ("", [||]), e)
+
+(* external ::= 'extern' prototype *)
+let parse_extern = parser
+ | [&lt; 'Token.Extern; e=parse_prototype &gt;] -&gt; e
+</pre>
+</dd>
+
+<dt>codegen.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Code Generation
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+
+exception Error of string
+
+let context = global_context ()
+let the_module = create_module context "my cool jit"
+let builder = builder context
+let named_values:(string, llvalue) Hashtbl.t = Hashtbl.create 10
+let double_type = double_type context
+
+let rec codegen_expr = function
+ | Ast.Number n -&gt; const_float double_type n
+ | Ast.Variable name -&gt;
+ (try Hashtbl.find named_values name with
+ | Not_found -&gt; raise (Error "unknown variable name"))
+ | Ast.Binary (op, lhs, rhs) -&gt;
+ let lhs_val = codegen_expr lhs in
+ let rhs_val = codegen_expr rhs in
+ begin
+ match op with
+ | '+' -&gt; build_add lhs_val rhs_val "addtmp" builder
+ | '-' -&gt; build_sub lhs_val rhs_val "subtmp" builder
+ | '*' -&gt; build_mul lhs_val rhs_val "multmp" builder
+ | '&lt;' -&gt;
+ (* Convert bool 0/1 to double 0.0 or 1.0 *)
+ let i = build_fcmp Fcmp.Ult lhs_val rhs_val "cmptmp" builder in
+ build_uitofp i double_type "booltmp" builder
+ | _ -&gt; raise (Error "invalid binary operator")
+ end
+ | Ast.Call (callee, args) -&gt;
+ (* Look up the name in the module table. *)
+ let callee =
+ match lookup_function callee the_module with
+ | Some callee -&gt; callee
+ | None -&gt; raise (Error "unknown function referenced")
+ in
+ let params = params callee in
+
+ (* If argument mismatch error. *)
+ if Array.length params == Array.length args then () else
+ raise (Error "incorrect # arguments passed");
+ let args = Array.map codegen_expr args in
+ build_call callee args "calltmp" builder
+
+let codegen_proto = function
+ | Ast.Prototype (name, args) -&gt;
+ (* Make the function type: double(double,double) etc. *)
+ let doubles = Array.make (Array.length args) double_type in
+ let ft = function_type double_type doubles in
+ let f =
+ match lookup_function name the_module with
+ | None -&gt; declare_function name ft the_module
+
+ (* If 'f' conflicted, there was already something named 'name'. If it
+ * has a body, don't allow redefinition or reextern. *)
+ | Some f -&gt;
+ (* If 'f' already has a body, reject this. *)
+ if block_begin f &lt;&gt; At_end f then
+ raise (Error "redefinition of function");
+
+ (* If 'f' took a different number of arguments, reject. *)
+ if element_type (type_of f) &lt;&gt; ft then
+ raise (Error "redefinition of function with different # args");
+ f
+ in
+
+ (* Set names for all arguments. *)
+ Array.iteri (fun i a -&gt;
+ let n = args.(i) in
+ set_value_name n a;
+ Hashtbl.add named_values n a;
+ ) (params f);
+ f
+
+let codegen_func the_fpm = function
+ | Ast.Function (proto, body) -&gt;
+ Hashtbl.clear named_values;
+ let the_function = codegen_proto proto in
+
+ (* Create a new basic block to start insertion into. *)
+ let bb = append_block context "entry" the_function in
+ position_at_end bb builder;
+
+ try
+ let ret_val = codegen_expr body in
+
+ (* Finish off the function. *)
+ let _ = build_ret ret_val builder in
+
+ (* Validate the generated code, checking for consistency. *)
+ Llvm_analysis.assert_valid_function the_function;
+
+ (* Optimize the function. *)
+ let _ = PassManager.run_function the_function the_fpm in
+
+ the_function
+ with e -&gt;
+ delete_function the_function;
+ raise e
+</pre>
+</dd>
+
+<dt>toplevel.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Top-Level parsing and JIT Driver
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+open Llvm_executionengine
+
+(* top ::= definition | external | expression | ';' *)
+let rec main_loop the_fpm the_execution_engine stream =
+ match Stream.peek stream with
+ | None -&gt; ()
+
+ (* ignore top-level semicolons. *)
+ | Some (Token.Kwd ';') -&gt;
+ Stream.junk stream;
+ main_loop the_fpm the_execution_engine stream
+
+ | Some token -&gt;
+ begin
+ try match token with
+ | Token.Def -&gt;
+ let e = Parser.parse_definition stream in
+ print_endline "parsed a function definition.";
+ dump_value (Codegen.codegen_func the_fpm e);
+ | Token.Extern -&gt;
+ let e = Parser.parse_extern stream in
+ print_endline "parsed an extern.";
+ dump_value (Codegen.codegen_proto e);
+ | _ -&gt;
+ (* Evaluate a top-level expression into an anonymous function. *)
+ let e = Parser.parse_toplevel stream in
+ print_endline "parsed a top-level expr";
+ let the_function = Codegen.codegen_func the_fpm e in
+ dump_value the_function;
+
+ (* JIT the function, returning a function pointer. *)
+ let result = ExecutionEngine.run_function the_function [||]
+ the_execution_engine in
+
+ print_string "Evaluated to ";
+ print_float (GenericValue.as_float Codegen.double_type result);
+ print_newline ();
+ with Stream.Error s | Codegen.Error s -&gt;
+ (* Skip token for error recovery. *)
+ Stream.junk stream;
+ print_endline s;
+ end;
+ print_string "ready&gt; "; flush stdout;
+ main_loop the_fpm the_execution_engine stream
+</pre>
+</dd>
+
+<dt>toy.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Main driver code.
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+open Llvm_executionengine
+open Llvm_target
+open Llvm_scalar_opts
+
+let main () =
+ ignore (initialize_native_target ());
+
+ (* Install standard binary operators.
+ * 1 is the lowest precedence. *)
+ Hashtbl.add Parser.binop_precedence '&lt;' 10;
+ Hashtbl.add Parser.binop_precedence '+' 20;
+ Hashtbl.add Parser.binop_precedence '-' 20;
+ Hashtbl.add Parser.binop_precedence '*' 40; (* highest. *)
+
+ (* Prime the first token. *)
+ print_string "ready&gt; "; flush stdout;
+ let stream = Lexer.lex (Stream.of_channel stdin) in
+
+ (* Create the JIT. *)
+ let the_execution_engine = ExecutionEngine.create Codegen.the_module in
+ let the_fpm = PassManager.create_function Codegen.the_module in
+
+ (* Set up the optimizer pipeline. Start with registering info about how the
+ * target lays out data structures. *)
+ TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm;
+
+ (* Do simple "peephole" optimizations and bit-twiddling optzn. *)
+ add_instruction_combination the_fpm;
+
+ (* reassociate expressions. *)
+ add_reassociation the_fpm;
+
+ (* Eliminate Common SubExpressions. *)
+ add_gvn the_fpm;
+
+ (* Simplify the control flow graph (deleting unreachable blocks, etc). *)
+ add_cfg_simplification the_fpm;
+
+ ignore (PassManager.initialize the_fpm);
+
+ (* Run the main "interpreter loop" now. *)
+ Toplevel.main_loop the_fpm the_execution_engine stream;
+
+ (* Print out all the generated code. *)
+ dump_module Codegen.the_module
+;;
+
+main ()
+</pre>
+</dd>
+
+<dt>bindings.c</dt>
+<dd class="doc_code">
+<pre>
+#include &lt;stdio.h&gt;
+
+/* putchard - putchar that takes a double and returns 0. */
+extern double putchard(double X) {
+ putchar((char)X);
+ return 0;
+}
+</pre>
+</dd>
+</dl>
+
+<a href="OCamlLangImpl5.html">Next: Extending the language: control flow</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="mailto:idadesub@users.sourceforge.net">Erick Tryzelaar</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/OCamlLangImpl5.html b/docs/tutorial/OCamlLangImpl5.html
new file mode 100644
index 00000000000..feeed6a5337
--- /dev/null
+++ b/docs/tutorial/OCamlLangImpl5.html
@@ -0,0 +1,1560 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Extending the Language: Control Flow</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <meta name="author" content="Erick Tryzelaar">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Extending the Language: Control Flow</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 5
+ <ol>
+ <li><a href="#intro">Chapter 5 Introduction</a></li>
+ <li><a href="#ifthen">If/Then/Else</a>
+ <ol>
+ <li><a href="#iflexer">Lexer Extensions</a></li>
+ <li><a href="#ifast">AST Extensions</a></li>
+ <li><a href="#ifparser">Parser Extensions</a></li>
+ <li><a href="#ifir">LLVM IR</a></li>
+ <li><a href="#ifcodegen">Code Generation</a></li>
+ </ol>
+ </li>
+ <li><a href="#for">'for' Loop Expression</a>
+ <ol>
+ <li><a href="#forlexer">Lexer Extensions</a></li>
+ <li><a href="#forast">AST Extensions</a></li>
+ <li><a href="#forparser">Parser Extensions</a></li>
+ <li><a href="#forir">LLVM IR</a></li>
+ <li><a href="#forcodegen">Code Generation</a></li>
+ </ol>
+ </li>
+ <li><a href="#code">Full Code Listing</a></li>
+ </ol>
+</li>
+<li><a href="OCamlLangImpl6.html">Chapter 6</a>: Extending the Language:
+User-defined Operators</li>
+</ul>
+
+<div class="doc_author">
+ <p>
+ Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a>
+ and <a href="mailto:idadesub@users.sourceforge.net">Erick Tryzelaar</a>
+ </p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Chapter 5 Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to Chapter 5 of the "<a href="index.html">Implementing a language
+with LLVM</a>" tutorial. Parts 1-4 described the implementation of the simple
+Kaleidoscope language and included support for generating LLVM IR, followed by
+optimizations and a JIT compiler. Unfortunately, as presented, Kaleidoscope is
+mostly useless: it has no control flow other than call and return. This means
+that you can't have conditional branches in the code, significantly limiting its
+power. In this episode of "build that compiler", we'll extend Kaleidoscope to
+have an if/then/else expression plus a simple 'for' loop.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="ifthen">If/Then/Else</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Extending Kaleidoscope to support if/then/else is quite straightforward. It
+basically requires adding lexer support for this "new" concept to the lexer,
+parser, AST, and LLVM code emitter. This example is nice, because it shows how
+easy it is to "grow" a language over time, incrementally extending it as new
+ideas are discovered.</p>
+
+<p>Before we get going on "how" we add this extension, lets talk about "what" we
+want. The basic idea is that we want to be able to write this sort of thing:
+</p>
+
+<div class="doc_code">
+<pre>
+def fib(x)
+ if x &lt; 3 then
+ 1
+ else
+ fib(x-1)+fib(x-2);
+</pre>
+</div>
+
+<p>In Kaleidoscope, every construct is an expression: there are no statements.
+As such, the if/then/else expression needs to return a value like any other.
+Since we're using a mostly functional form, we'll have it evaluate its
+conditional, then return the 'then' or 'else' value based on how the condition
+was resolved. This is very similar to the C "?:" expression.</p>
+
+<p>The semantics of the if/then/else expression is that it evaluates the
+condition to a boolean equality value: 0.0 is considered to be false and
+everything else is considered to be true.
+If the condition is true, the first subexpression is evaluated and returned, if
+the condition is false, the second subexpression is evaluated and returned.
+Since Kaleidoscope allows side-effects, this behavior is important to nail down.
+</p>
+
+<p>Now that we know what we "want", lets break this down into its constituent
+pieces.</p>
+
+<!-- ======================================================================= -->
+<h4><a name="iflexer">Lexer Extensions for If/Then/Else</a></h4>
+<!-- ======================================================================= -->
+
+
+<div>
+
+<p>The lexer extensions are straightforward. First we add new variants
+for the relevant tokens:</p>
+
+<div class="doc_code">
+<pre>
+ (* control *)
+ | If | Then | Else | For | In
+</pre>
+</div>
+
+<p>Once we have that, we recognize the new keywords in the lexer. This is pretty simple
+stuff:</p>
+
+<div class="doc_code">
+<pre>
+ ...
+ match Buffer.contents buffer with
+ | "def" -&gt; [&lt; 'Token.Def; stream &gt;]
+ | "extern" -&gt; [&lt; 'Token.Extern; stream &gt;]
+ | "if" -&gt; [&lt; 'Token.If; stream &gt;]
+ | "then" -&gt; [&lt; 'Token.Then; stream &gt;]
+ | "else" -&gt; [&lt; 'Token.Else; stream &gt;]
+ | "for" -&gt; [&lt; 'Token.For; stream &gt;]
+ | "in" -&gt; [&lt; 'Token.In; stream &gt;]
+ | id -&gt; [&lt; 'Token.Ident id; stream &gt;]
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="ifast">AST Extensions for If/Then/Else</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>To represent the new expression we add a new AST variant for it:</p>
+
+<div class="doc_code">
+<pre>
+type expr =
+ ...
+ (* variant for if/then/else. *)
+ | If of expr * expr * expr
+</pre>
+</div>
+
+<p>The AST variant just has pointers to the various subexpressions.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="ifparser">Parser Extensions for If/Then/Else</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>Now that we have the relevant tokens coming from the lexer and we have the
+AST node to build, our parsing logic is relatively straightforward. First we
+define a new parsing function:</p>
+
+<div class="doc_code">
+<pre>
+let rec parse_primary = parser
+ ...
+ (* ifexpr ::= 'if' expr 'then' expr 'else' expr *)
+ | [&lt; 'Token.If; c=parse_expr;
+ 'Token.Then ?? "expected 'then'"; t=parse_expr;
+ 'Token.Else ?? "expected 'else'"; e=parse_expr &gt;] -&gt;
+ Ast.If (c, t, e)
+</pre>
+</div>
+
+<p>Next we hook it up as a primary expression:</p>
+
+<div class="doc_code">
+<pre>
+let rec parse_primary = parser
+ ...
+ (* ifexpr ::= 'if' expr 'then' expr 'else' expr *)
+ | [&lt; 'Token.If; c=parse_expr;
+ 'Token.Then ?? "expected 'then'"; t=parse_expr;
+ 'Token.Else ?? "expected 'else'"; e=parse_expr &gt;] -&gt;
+ Ast.If (c, t, e)
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="ifir">LLVM IR for If/Then/Else</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>Now that we have it parsing and building the AST, the final piece is adding
+LLVM code generation support. This is the most interesting part of the
+if/then/else example, because this is where it starts to introduce new concepts.
+All of the code above has been thoroughly described in previous chapters.
+</p>
+
+<p>To motivate the code we want to produce, lets take a look at a simple
+example. Consider:</p>
+
+<div class="doc_code">
+<pre>
+extern foo();
+extern bar();
+def baz(x) if x then foo() else bar();
+</pre>
+</div>
+
+<p>If you disable optimizations, the code you'll (soon) get from Kaleidoscope
+looks like this:</p>
+
+<div class="doc_code">
+<pre>
+declare double @foo()
+
+declare double @bar()
+
+define double @baz(double %x) {
+entry:
+ %ifcond = fcmp one double %x, 0.000000e+00
+ br i1 %ifcond, label %then, label %else
+
+then: ; preds = %entry
+ %calltmp = call double @foo()
+ br label %ifcont
+
+else: ; preds = %entry
+ %calltmp1 = call double @bar()
+ br label %ifcont
+
+ifcont: ; preds = %else, %then
+ %iftmp = phi double [ %calltmp, %then ], [ %calltmp1, %else ]
+ ret double %iftmp
+}
+</pre>
+</div>
+
+<p>To visualize the control flow graph, you can use a nifty feature of the LLVM
+'<a href="http://llvm.org/cmds/opt.html">opt</a>' tool. If you put this LLVM IR
+into "t.ll" and run "<tt>llvm-as &lt; t.ll | opt -analyze -view-cfg</tt>", <a
+href="../ProgrammersManual.html#ViewGraph">a window will pop up</a> and you'll
+see this graph:</p>
+
+<div style="text-align: center"><img src="LangImpl5-cfg.png" alt="Example CFG" width="423"
+height="315"></div>
+
+<p>Another way to get this is to call "<tt>Llvm_analysis.view_function_cfg
+f</tt>" or "<tt>Llvm_analysis.view_function_cfg_only f</tt>" (where <tt>f</tt>
+is a "<tt>Function</tt>") either by inserting actual calls into the code and
+recompiling or by calling these in the debugger. LLVM has many nice features
+for visualizing various graphs.</p>
+
+<p>Getting back to the generated code, it is fairly simple: the entry block
+evaluates the conditional expression ("x" in our case here) and compares the
+result to 0.0 with the "<tt><a href="../LangRef.html#i_fcmp">fcmp</a> one</tt>"
+instruction ('one' is "Ordered and Not Equal"). Based on the result of this
+expression, the code jumps to either the "then" or "else" blocks, which contain
+the expressions for the true/false cases.</p>
+
+<p>Once the then/else blocks are finished executing, they both branch back to the
+'ifcont' block to execute the code that happens after the if/then/else. In this
+case the only thing left to do is to return to the caller of the function. The
+question then becomes: how does the code know which expression to return?</p>
+
+<p>The answer to this question involves an important SSA operation: the
+<a href="http://en.wikipedia.org/wiki/Static_single_assignment_form">Phi
+operation</a>. If you're not familiar with SSA, <a
+href="http://en.wikipedia.org/wiki/Static_single_assignment_form">the wikipedia
+article</a> is a good introduction and there are various other introductions to
+it available on your favorite search engine. The short version is that
+"execution" of the Phi operation requires "remembering" which block control came
+from. The Phi operation takes on the value corresponding to the input control
+block. In this case, if control comes in from the "then" block, it gets the
+value of "calltmp". If control comes from the "else" block, it gets the value
+of "calltmp1".</p>
+
+<p>At this point, you are probably starting to think "Oh no! This means my
+simple and elegant front-end will have to start generating SSA form in order to
+use LLVM!". Fortunately, this is not the case, and we strongly advise
+<em>not</em> implementing an SSA construction algorithm in your front-end
+unless there is an amazingly good reason to do so. In practice, there are two
+sorts of values that float around in code written for your average imperative
+programming language that might need Phi nodes:</p>
+
+<ol>
+<li>Code that involves user variables: <tt>x = 1; x = x + 1; </tt></li>
+<li>Values that are implicit in the structure of your AST, such as the Phi node
+in this case.</li>
+</ol>
+
+<p>In <a href="OCamlLangImpl7.html">Chapter 7</a> of this tutorial ("mutable
+variables"), we'll talk about #1
+in depth. For now, just believe me that you don't need SSA construction to
+handle this case. For #2, you have the choice of using the techniques that we will
+describe for #1, or you can insert Phi nodes directly, if convenient. In this
+case, it is really really easy to generate the Phi node, so we choose to do it
+directly.</p>
+
+<p>Okay, enough of the motivation and overview, lets generate code!</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="ifcodegen">Code Generation for If/Then/Else</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>In order to generate code for this, we implement the <tt>Codegen</tt> method
+for <tt>IfExprAST</tt>:</p>
+
+<div class="doc_code">
+<pre>
+let rec codegen_expr = function
+ ...
+ | Ast.If (cond, then_, else_) -&gt;
+ let cond = codegen_expr cond in
+
+ (* Convert condition to a bool by comparing equal to 0.0 *)
+ let zero = const_float double_type 0.0 in
+ let cond_val = build_fcmp Fcmp.One cond zero "ifcond" builder in
+</pre>
+</div>
+
+<p>This code is straightforward and similar to what we saw before. We emit the
+expression for the condition, then compare that value to zero to get a truth
+value as a 1-bit (bool) value.</p>
+
+<div class="doc_code">
+<pre>
+ (* Grab the first block so that we might later add the conditional branch
+ * to it at the end of the function. *)
+ let start_bb = insertion_block builder in
+ let the_function = block_parent start_bb in
+
+ let then_bb = append_block context "then" the_function in
+ position_at_end then_bb builder;
+</pre>
+</div>
+
+<p>
+As opposed to the <a href="LangImpl5.html">C++ tutorial</a>, we have to build
+our basic blocks bottom up since we can't have dangling BasicBlocks. We start
+off by saving a pointer to the first block (which might not be the entry
+block), which we'll need to build a conditional branch later. We do this by
+asking the <tt>builder</tt> for the current BasicBlock. The fourth line
+gets the current Function object that is being built. It gets this by the
+<tt>start_bb</tt> for its "parent" (the function it is currently embedded
+into).</p>
+
+<p>Once it has that, it creates one block. It is automatically appended into
+the function's list of blocks.</p>
+
+<div class="doc_code">
+<pre>
+ (* Emit 'then' value. *)
+ position_at_end then_bb builder;
+ let then_val = codegen_expr then_ in
+
+ (* Codegen of 'then' can change the current block, update then_bb for the
+ * phi. We create a new name because one is used for the phi node, and the
+ * other is used for the conditional branch. *)
+ let new_then_bb = insertion_block builder in
+</pre>
+</div>
+
+<p>We move the builder to start inserting into the "then" block. Strictly
+speaking, this call moves the insertion point to be at the end of the specified
+block. However, since the "then" block is empty, it also starts out by
+inserting at the beginning of the block. :)</p>
+
+<p>Once the insertion point is set, we recursively codegen the "then" expression
+from the AST.</p>
+
+<p>The final line here is quite subtle, but is very important. The basic issue
+is that when we create the Phi node in the merge block, we need to set up the
+block/value pairs that indicate how the Phi will work. Importantly, the Phi
+node expects to have an entry for each predecessor of the block in the CFG. Why
+then, are we getting the current block when we just set it to ThenBB 5 lines
+above? The problem is that the "Then" expression may actually itself change the
+block that the Builder is emitting into if, for example, it contains a nested
+"if/then/else" expression. Because calling Codegen recursively could
+arbitrarily change the notion of the current block, we are required to get an
+up-to-date value for code that will set up the Phi node.</p>
+
+<div class="doc_code">
+<pre>
+ (* Emit 'else' value. *)
+ let else_bb = append_block context "else" the_function in
+ position_at_end else_bb builder;
+ let else_val = codegen_expr else_ in
+
+ (* Codegen of 'else' can change the current block, update else_bb for the
+ * phi. *)
+ let new_else_bb = insertion_block builder in
+</pre>
+</div>
+
+<p>Code generation for the 'else' block is basically identical to codegen for
+the 'then' block.</p>
+
+<div class="doc_code">
+<pre>
+ (* Emit merge block. *)
+ let merge_bb = append_block context "ifcont" the_function in
+ position_at_end merge_bb builder;
+ let incoming = [(then_val, new_then_bb); (else_val, new_else_bb)] in
+ let phi = build_phi incoming "iftmp" builder in
+</pre>
+</div>
+
+<p>The first two lines here are now familiar: the first adds the "merge" block
+to the Function object. The second block changes the insertion point so that
+newly created code will go into the "merge" block. Once that is done, we need
+to create the PHI node and set up the block/value pairs for the PHI.</p>
+
+<div class="doc_code">
+<pre>
+ (* Return to the start block to add the conditional branch. *)
+ position_at_end start_bb builder;
+ ignore (build_cond_br cond_val then_bb else_bb builder);
+</pre>
+</div>
+
+<p>Once the blocks are created, we can emit the conditional branch that chooses
+between them. Note that creating new blocks does not implicitly affect the
+IRBuilder, so it is still inserting into the block that the condition
+went into. This is why we needed to save the "start" block.</p>
+
+<div class="doc_code">
+<pre>
+ (* Set a unconditional branch at the end of the 'then' block and the
+ * 'else' block to the 'merge' block. *)
+ position_at_end new_then_bb builder; ignore (build_br merge_bb builder);
+ position_at_end new_else_bb builder; ignore (build_br merge_bb builder);
+
+ (* Finally, set the builder to the end of the merge block. *)
+ position_at_end merge_bb builder;
+
+ phi
+</pre>
+</div>
+
+<p>To finish off the blocks, we create an unconditional branch
+to the merge block. One interesting (and very important) aspect of the LLVM IR
+is that it <a href="../LangRef.html#functionstructure">requires all basic blocks
+to be "terminated"</a> with a <a href="../LangRef.html#terminators">control flow
+instruction</a> such as return or branch. This means that all control flow,
+<em>including fall throughs</em> must be made explicit in the LLVM IR. If you
+violate this rule, the verifier will emit an error.
+
+<p>Finally, the CodeGen function returns the phi node as the value computed by
+the if/then/else expression. In our example above, this returned value will
+feed into the code for the top-level function, which will create the return
+instruction.</p>
+
+<p>Overall, we now have the ability to execute conditional code in
+Kaleidoscope. With this extension, Kaleidoscope is a fairly complete language
+that can calculate a wide variety of numeric functions. Next up we'll add
+another useful expression that is familiar from non-functional languages...</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="for">'for' Loop Expression</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Now that we know how to add basic control flow constructs to the language,
+we have the tools to add more powerful things. Lets add something more
+aggressive, a 'for' expression:</p>
+
+<div class="doc_code">
+<pre>
+ extern putchard(char);
+ def printstar(n)
+ for i = 1, i &lt; n, 1.0 in
+ putchard(42); # ascii 42 = '*'
+
+ # print 100 '*' characters
+ printstar(100);
+</pre>
+</div>
+
+<p>This expression defines a new variable ("i" in this case) which iterates from
+a starting value, while the condition ("i &lt; n" in this case) is true,
+incrementing by an optional step value ("1.0" in this case). If the step value
+is omitted, it defaults to 1.0. While the loop is true, it executes its
+body expression. Because we don't have anything better to return, we'll just
+define the loop as always returning 0.0. In the future when we have mutable
+variables, it will get more useful.</p>
+
+<p>As before, lets talk about the changes that we need to Kaleidoscope to
+support this.</p>
+
+<!-- ======================================================================= -->
+<h4><a name="forlexer">Lexer Extensions for the 'for' Loop</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>The lexer extensions are the same sort of thing as for if/then/else:</p>
+
+<div class="doc_code">
+<pre>
+ ... in Token.token ...
+ (* control *)
+ | If | Then | Else
+ <b>| For | In</b>
+
+ ... in Lexer.lex_ident...
+ match Buffer.contents buffer with
+ | "def" -&gt; [&lt; 'Token.Def; stream &gt;]
+ | "extern" -&gt; [&lt; 'Token.Extern; stream &gt;]
+ | "if" -&gt; [&lt; 'Token.If; stream &gt;]
+ | "then" -&gt; [&lt; 'Token.Then; stream &gt;]
+ | "else" -&gt; [&lt; 'Token.Else; stream &gt;]
+ <b>| "for" -&gt; [&lt; 'Token.For; stream &gt;]
+ | "in" -&gt; [&lt; 'Token.In; stream &gt;]</b>
+ | id -&gt; [&lt; 'Token.Ident id; stream &gt;]
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="forast">AST Extensions for the 'for' Loop</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>The AST variant is just as simple. It basically boils down to capturing
+the variable name and the constituent expressions in the node.</p>
+
+<div class="doc_code">
+<pre>
+type expr =
+ ...
+ (* variant for for/in. *)
+ | For of string * expr * expr * expr option * expr
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="forparser">Parser Extensions for the 'for' Loop</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>The parser code is also fairly standard. The only interesting thing here is
+handling of the optional step value. The parser code handles it by checking to
+see if the second comma is present. If not, it sets the step value to null in
+the AST node:</p>
+
+<div class="doc_code">
+<pre>
+let rec parse_primary = parser
+ ...
+ (* forexpr
+ ::= 'for' identifier '=' expr ',' expr (',' expr)? 'in' expression *)
+ | [&lt; 'Token.For;
+ 'Token.Ident id ?? "expected identifier after for";
+ 'Token.Kwd '=' ?? "expected '=' after for";
+ stream &gt;] -&gt;
+ begin parser
+ | [&lt;
+ start=parse_expr;
+ 'Token.Kwd ',' ?? "expected ',' after for";
+ end_=parse_expr;
+ stream &gt;] -&gt;
+ let step =
+ begin parser
+ | [&lt; 'Token.Kwd ','; step=parse_expr &gt;] -&gt; Some step
+ | [&lt; &gt;] -&gt; None
+ end stream
+ in
+ begin parser
+ | [&lt; 'Token.In; body=parse_expr &gt;] -&gt;
+ Ast.For (id, start, end_, step, body)
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected 'in' after for")
+ end stream
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected '=' after for")
+ end stream
+</pre>
+</div>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="forir">LLVM IR for the 'for' Loop</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>Now we get to the good part: the LLVM IR we want to generate for this thing.
+With the simple example above, we get this LLVM IR (note that this dump is
+generated with optimizations disabled for clarity):
+</p>
+
+<div class="doc_code">
+<pre>
+declare double @putchard(double)
+
+define double @printstar(double %n) {
+entry:
+ ; initial value = 1.0 (inlined into phi)
+ br label %loop
+
+loop: ; preds = %loop, %entry
+ %i = phi double [ 1.000000e+00, %entry ], [ %nextvar, %loop ]
+ ; body
+ %calltmp = call double @putchard(double 4.200000e+01)
+ ; increment
+ %nextvar = fadd double %i, 1.000000e+00
+
+ ; termination test
+ %cmptmp = fcmp ult double %i, %n
+ %booltmp = uitofp i1 %cmptmp to double
+ %loopcond = fcmp one double %booltmp, 0.000000e+00
+ br i1 %loopcond, label %loop, label %afterloop
+
+afterloop: ; preds = %loop
+ ; loop always returns 0.0
+ ret double 0.000000e+00
+}
+</pre>
+</div>
+
+<p>This loop contains all the same constructs we saw before: a phi node, several
+expressions, and some basic blocks. Lets see how this fits together.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="forcodegen">Code Generation for the 'for' Loop</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>The first part of Codegen is very simple: we just output the start expression
+for the loop value:</p>
+
+<div class="doc_code">
+<pre>
+let rec codegen_expr = function
+ ...
+ | Ast.For (var_name, start, end_, step, body) -&gt;
+ (* Emit the start code first, without 'variable' in scope. *)
+ let start_val = codegen_expr start in
+</pre>
+</div>
+
+<p>With this out of the way, the next step is to set up the LLVM basic block
+for the start of the loop body. In the case above, the whole loop body is one
+block, but remember that the body code itself could consist of multiple blocks
+(e.g. if it contains an if/then/else or a for/in expression).</p>
+
+<div class="doc_code">
+<pre>
+ (* Make the new basic block for the loop header, inserting after current
+ * block. *)
+ let preheader_bb = insertion_block builder in
+ let the_function = block_parent preheader_bb in
+ let loop_bb = append_block context "loop" the_function in
+
+ (* Insert an explicit fall through from the current block to the
+ * loop_bb. *)
+ ignore (build_br loop_bb builder);
+</pre>
+</div>
+
+<p>This code is similar to what we saw for if/then/else. Because we will need
+it to create the Phi node, we remember the block that falls through into the
+loop. Once we have that, we create the actual block that starts the loop and
+create an unconditional branch for the fall-through between the two blocks.</p>
+
+<div class="doc_code">
+<pre>
+ (* Start insertion in loop_bb. *)
+ position_at_end loop_bb builder;
+
+ (* Start the PHI node with an entry for start. *)
+ let variable = build_phi [(start_val, preheader_bb)] var_name builder in
+</pre>
+</div>
+
+<p>Now that the "preheader" for the loop is set up, we switch to emitting code
+for the loop body. To begin with, we move the insertion point and create the
+PHI node for the loop induction variable. Since we already know the incoming
+value for the starting value, we add it to the Phi node. Note that the Phi will
+eventually get a second value for the backedge, but we can't set it up yet
+(because it doesn't exist!).</p>
+
+<div class="doc_code">
+<pre>
+ (* Within the loop, the variable is defined equal to the PHI node. If it
+ * shadows an existing variable, we have to restore it, so save it
+ * now. *)
+ let old_val =
+ try Some (Hashtbl.find named_values var_name) with Not_found -&gt; None
+ in
+ Hashtbl.add named_values var_name variable;
+
+ (* Emit the body of the loop. This, like any other expr, can change the
+ * current BB. Note that we ignore the value computed by the body, but
+ * don't allow an error *)
+ ignore (codegen_expr body);
+</pre>
+</div>
+
+<p>Now the code starts to get more interesting. Our 'for' loop introduces a new
+variable to the symbol table. This means that our symbol table can now contain
+either function arguments or loop variables. To handle this, before we codegen
+the body of the loop, we add the loop variable as the current value for its
+name. Note that it is possible that there is a variable of the same name in the
+outer scope. It would be easy to make this an error (emit an error and return
+null if there is already an entry for VarName) but we choose to allow shadowing
+of variables. In order to handle this correctly, we remember the Value that
+we are potentially shadowing in <tt>old_val</tt> (which will be None if there is
+no shadowed variable).</p>
+
+<p>Once the loop variable is set into the symbol table, the code recursively
+codegen's the body. This allows the body to use the loop variable: any
+references to it will naturally find it in the symbol table.</p>
+
+<div class="doc_code">
+<pre>
+ (* Emit the step value. *)
+ let step_val =
+ match step with
+ | Some step -&gt; codegen_expr step
+ (* If not specified, use 1.0. *)
+ | None -&gt; const_float double_type 1.0
+ in
+
+ let next_var = build_add variable step_val "nextvar" builder in
+</pre>
+</div>
+
+<p>Now that the body is emitted, we compute the next value of the iteration
+variable by adding the step value, or 1.0 if it isn't present.
+'<tt>next_var</tt>' will be the value of the loop variable on the next iteration
+of the loop.</p>
+
+<div class="doc_code">
+<pre>
+ (* Compute the end condition. *)
+ let end_cond = codegen_expr end_ in
+
+ (* Convert condition to a bool by comparing equal to 0.0. *)
+ let zero = const_float double_type 0.0 in
+ let end_cond = build_fcmp Fcmp.One end_cond zero "loopcond" builder in
+</pre>
+</div>
+
+<p>Finally, we evaluate the exit value of the loop, to determine whether the
+loop should exit. This mirrors the condition evaluation for the if/then/else
+statement.</p>
+
+<div class="doc_code">
+<pre>
+ (* Create the "after loop" block and insert it. *)
+ let loop_end_bb = insertion_block builder in
+ let after_bb = append_block context "afterloop" the_function in
+
+ (* Insert the conditional branch into the end of loop_end_bb. *)
+ ignore (build_cond_br end_cond loop_bb after_bb builder);
+
+ (* Any new code will be inserted in after_bb. *)
+ position_at_end after_bb builder;
+</pre>
+</div>
+
+<p>With the code for the body of the loop complete, we just need to finish up
+the control flow for it. This code remembers the end block (for the phi node), then creates the block for the loop exit ("afterloop"). Based on the value of the
+exit condition, it creates a conditional branch that chooses between executing
+the loop again and exiting the loop. Any future code is emitted in the
+"afterloop" block, so it sets the insertion position to it.</p>
+
+<div class="doc_code">
+<pre>
+ (* Add a new entry to the PHI node for the backedge. *)
+ add_incoming (next_var, loop_end_bb) variable;
+
+ (* Restore the unshadowed variable. *)
+ begin match old_val with
+ | Some old_val -&gt; Hashtbl.add named_values var_name old_val
+ | None -&gt; ()
+ end;
+
+ (* for expr always returns 0.0. *)
+ const_null double_type
+</pre>
+</div>
+
+<p>The final code handles various cleanups: now that we have the
+"<tt>next_var</tt>" value, we can add the incoming value to the loop PHI node.
+After that, we remove the loop variable from the symbol table, so that it isn't
+in scope after the for loop. Finally, code generation of the for loop always
+returns 0.0, so that is what we return from <tt>Codegen.codegen_expr</tt>.</p>
+
+<p>With this, we conclude the "adding control flow to Kaleidoscope" chapter of
+the tutorial. In this chapter we added two control flow constructs, and used
+them to motivate a couple of aspects of the LLVM IR that are important for
+front-end implementors to know. In the next chapter of our saga, we will get
+a bit crazier and add <a href="OCamlLangImpl6.html">user-defined operators</a>
+to our poor innocent language.</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="code">Full Code Listing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Here is the complete code listing for our running example, enhanced with the
+if/then/else and for expressions.. To build this example, use:
+</p>
+
+<div class="doc_code">
+<pre>
+# Compile
+ocamlbuild toy.byte
+# Run
+./toy.byte
+</pre>
+</div>
+
+<p>Here is the code:</p>
+
+<dl>
+<dt>_tags:</dt>
+<dd class="doc_code">
+<pre>
+&lt;{lexer,parser}.ml&gt;: use_camlp4, pp(camlp4of)
+&lt;*.{byte,native}&gt;: g++, use_llvm, use_llvm_analysis
+&lt;*.{byte,native}&gt;: use_llvm_executionengine, use_llvm_target
+&lt;*.{byte,native}&gt;: use_llvm_scalar_opts, use_bindings
+</pre>
+</dd>
+
+<dt>myocamlbuild.ml:</dt>
+<dd class="doc_code">
+<pre>
+open Ocamlbuild_plugin;;
+
+ocaml_lib ~extern:true "llvm";;
+ocaml_lib ~extern:true "llvm_analysis";;
+ocaml_lib ~extern:true "llvm_executionengine";;
+ocaml_lib ~extern:true "llvm_target";;
+ocaml_lib ~extern:true "llvm_scalar_opts";;
+
+flag ["link"; "ocaml"; "g++"] (S[A"-cc"; A"g++"]);;
+dep ["link"; "ocaml"; "use_bindings"] ["bindings.o"];;
+</pre>
+</dd>
+
+<dt>token.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Lexer Tokens
+ *===----------------------------------------------------------------------===*)
+
+(* The lexer returns these 'Kwd' if it is an unknown character, otherwise one of
+ * these others for known things. *)
+type token =
+ (* commands *)
+ | Def | Extern
+
+ (* primary *)
+ | Ident of string | Number of float
+
+ (* unknown *)
+ | Kwd of char
+
+ (* control *)
+ | If | Then | Else
+ | For | In
+</pre>
+</dd>
+
+<dt>lexer.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Lexer
+ *===----------------------------------------------------------------------===*)
+
+let rec lex = parser
+ (* Skip any whitespace. *)
+ | [&lt; ' (' ' | '\n' | '\r' | '\t'); stream &gt;] -&gt; lex stream
+
+ (* identifier: [a-zA-Z][a-zA-Z0-9] *)
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+
+ (* number: [0-9.]+ *)
+ | [&lt; ' ('0' .. '9' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+
+ (* Comment until end of line. *)
+ | [&lt; ' ('#'); stream &gt;] -&gt;
+ lex_comment stream
+
+ (* Otherwise, just return the character as its ascii value. *)
+ | [&lt; 'c; stream &gt;] -&gt;
+ [&lt; 'Token.Kwd c; lex stream &gt;]
+
+ (* end of stream. *)
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+
+and lex_number buffer = parser
+ | [&lt; ' ('0' .. '9' | '.' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ [&lt; 'Token.Number (float_of_string (Buffer.contents buffer)); stream &gt;]
+
+and lex_ident buffer = parser
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' | '0' .. '9' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ match Buffer.contents buffer with
+ | "def" -&gt; [&lt; 'Token.Def; stream &gt;]
+ | "extern" -&gt; [&lt; 'Token.Extern; stream &gt;]
+ | "if" -&gt; [&lt; 'Token.If; stream &gt;]
+ | "then" -&gt; [&lt; 'Token.Then; stream &gt;]
+ | "else" -&gt; [&lt; 'Token.Else; stream &gt;]
+ | "for" -&gt; [&lt; 'Token.For; stream &gt;]
+ | "in" -&gt; [&lt; 'Token.In; stream &gt;]
+ | id -&gt; [&lt; 'Token.Ident id; stream &gt;]
+
+and lex_comment = parser
+ | [&lt; ' ('\n'); stream=lex &gt;] -&gt; stream
+ | [&lt; 'c; e=lex_comment &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+</pre>
+</dd>
+
+<dt>ast.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Abstract Syntax Tree (aka Parse Tree)
+ *===----------------------------------------------------------------------===*)
+
+(* expr - Base type for all expression nodes. *)
+type expr =
+ (* variant for numeric literals like "1.0". *)
+ | Number of float
+
+ (* variant for referencing a variable, like "a". *)
+ | Variable of string
+
+ (* variant for a binary operator. *)
+ | Binary of char * expr * expr
+
+ (* variant for function calls. *)
+ | Call of string * expr array
+
+ (* variant for if/then/else. *)
+ | If of expr * expr * expr
+
+ (* variant for for/in. *)
+ | For of string * expr * expr * expr option * expr
+
+(* proto - This type represents the "prototype" for a function, which captures
+ * its name, and its argument names (thus implicitly the number of arguments the
+ * function takes). *)
+type proto = Prototype of string * string array
+
+(* func - This type represents a function definition itself. *)
+type func = Function of proto * expr
+</pre>
+</dd>
+
+<dt>parser.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===---------------------------------------------------------------------===
+ * Parser
+ *===---------------------------------------------------------------------===*)
+
+(* binop_precedence - This holds the precedence for each binary operator that is
+ * defined *)
+let binop_precedence:(char, int) Hashtbl.t = Hashtbl.create 10
+
+(* precedence - Get the precedence of the pending binary operator token. *)
+let precedence c = try Hashtbl.find binop_precedence c with Not_found -&gt; -1
+
+(* primary
+ * ::= identifier
+ * ::= numberexpr
+ * ::= parenexpr
+ * ::= ifexpr
+ * ::= forexpr *)
+let rec parse_primary = parser
+ (* numberexpr ::= number *)
+ | [&lt; 'Token.Number n &gt;] -&gt; Ast.Number n
+
+ (* parenexpr ::= '(' expression ')' *)
+ | [&lt; 'Token.Kwd '('; e=parse_expr; 'Token.Kwd ')' ?? "expected ')'" &gt;] -&gt; e
+
+ (* identifierexpr
+ * ::= identifier
+ * ::= identifier '(' argumentexpr ')' *)
+ | [&lt; 'Token.Ident id; stream &gt;] -&gt;
+ let rec parse_args accumulator = parser
+ | [&lt; e=parse_expr; stream &gt;] -&gt;
+ begin parser
+ | [&lt; 'Token.Kwd ','; e=parse_args (e :: accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; e :: accumulator
+ end stream
+ | [&lt; &gt;] -&gt; accumulator
+ in
+ let rec parse_ident id = parser
+ (* Call. *)
+ | [&lt; 'Token.Kwd '(';
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')'"&gt;] -&gt;
+ Ast.Call (id, Array.of_list (List.rev args))
+
+ (* Simple variable ref. *)
+ | [&lt; &gt;] -&gt; Ast.Variable id
+ in
+ parse_ident id stream
+
+ (* ifexpr ::= 'if' expr 'then' expr 'else' expr *)
+ | [&lt; 'Token.If; c=parse_expr;
+ 'Token.Then ?? "expected 'then'"; t=parse_expr;
+ 'Token.Else ?? "expected 'else'"; e=parse_expr &gt;] -&gt;
+ Ast.If (c, t, e)
+
+ (* forexpr
+ ::= 'for' identifier '=' expr ',' expr (',' expr)? 'in' expression *)
+ | [&lt; 'Token.For;
+ 'Token.Ident id ?? "expected identifier after for";
+ 'Token.Kwd '=' ?? "expected '=' after for";
+ stream &gt;] -&gt;
+ begin parser
+ | [&lt;
+ start=parse_expr;
+ 'Token.Kwd ',' ?? "expected ',' after for";
+ end_=parse_expr;
+ stream &gt;] -&gt;
+ let step =
+ begin parser
+ | [&lt; 'Token.Kwd ','; step=parse_expr &gt;] -&gt; Some step
+ | [&lt; &gt;] -&gt; None
+ end stream
+ in
+ begin parser
+ | [&lt; 'Token.In; body=parse_expr &gt;] -&gt;
+ Ast.For (id, start, end_, step, body)
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected 'in' after for")
+ end stream
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected '=' after for")
+ end stream
+
+ | [&lt; &gt;] -&gt; raise (Stream.Error "unknown token when expecting an expression.")
+
+(* binoprhs
+ * ::= ('+' primary)* *)
+and parse_bin_rhs expr_prec lhs stream =
+ match Stream.peek stream with
+ (* If this is a binop, find its precedence. *)
+ | Some (Token.Kwd c) when Hashtbl.mem binop_precedence c -&gt;
+ let token_prec = precedence c in
+
+ (* If this is a binop that binds at least as tightly as the current binop,
+ * consume it, otherwise we are done. *)
+ if token_prec &lt; expr_prec then lhs else begin
+ (* Eat the binop. *)
+ Stream.junk stream;
+
+ (* Parse the primary expression after the binary operator. *)
+ let rhs = parse_primary stream in
+
+ (* Okay, we know this is a binop. *)
+ let rhs =
+ match Stream.peek stream with
+ | Some (Token.Kwd c2) -&gt;
+ (* If BinOp binds less tightly with rhs than the operator after
+ * rhs, let the pending operator take rhs as its lhs. *)
+ let next_prec = precedence c2 in
+ if token_prec &lt; next_prec
+ then parse_bin_rhs (token_prec + 1) rhs stream
+ else rhs
+ | _ -&gt; rhs
+ in
+
+ (* Merge lhs/rhs. *)
+ let lhs = Ast.Binary (c, lhs, rhs) in
+ parse_bin_rhs expr_prec lhs stream
+ end
+ | _ -&gt; lhs
+
+(* expression
+ * ::= primary binoprhs *)
+and parse_expr = parser
+ | [&lt; lhs=parse_primary; stream &gt;] -&gt; parse_bin_rhs 0 lhs stream
+
+(* prototype
+ * ::= id '(' id* ')' *)
+let parse_prototype =
+ let rec parse_args accumulator = parser
+ | [&lt; 'Token.Ident id; e=parse_args (id::accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; accumulator
+ in
+
+ parser
+ | [&lt; 'Token.Ident id;
+ 'Token.Kwd '(' ?? "expected '(' in prototype";
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')' in prototype" &gt;] -&gt;
+ (* success. *)
+ Ast.Prototype (id, Array.of_list (List.rev args))
+
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected function name in prototype")
+
+(* definition ::= 'def' prototype expression *)
+let parse_definition = parser
+ | [&lt; 'Token.Def; p=parse_prototype; e=parse_expr &gt;] -&gt;
+ Ast.Function (p, e)
+
+(* toplevelexpr ::= expression *)
+let parse_toplevel = parser
+ | [&lt; e=parse_expr &gt;] -&gt;
+ (* Make an anonymous proto. *)
+ Ast.Function (Ast.Prototype ("", [||]), e)
+
+(* external ::= 'extern' prototype *)
+let parse_extern = parser
+ | [&lt; 'Token.Extern; e=parse_prototype &gt;] -&gt; e
+</pre>
+</dd>
+
+<dt>codegen.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Code Generation
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+
+exception Error of string
+
+let context = global_context ()
+let the_module = create_module context "my cool jit"
+let builder = builder context
+let named_values:(string, llvalue) Hashtbl.t = Hashtbl.create 10
+let double_type = double_type context
+
+let rec codegen_expr = function
+ | Ast.Number n -&gt; const_float double_type n
+ | Ast.Variable name -&gt;
+ (try Hashtbl.find named_values name with
+ | Not_found -&gt; raise (Error "unknown variable name"))
+ | Ast.Binary (op, lhs, rhs) -&gt;
+ let lhs_val = codegen_expr lhs in
+ let rhs_val = codegen_expr rhs in
+ begin
+ match op with
+ | '+' -&gt; build_add lhs_val rhs_val "addtmp" builder
+ | '-' -&gt; build_sub lhs_val rhs_val "subtmp" builder
+ | '*' -&gt; build_mul lhs_val rhs_val "multmp" builder
+ | '&lt;' -&gt;
+ (* Convert bool 0/1 to double 0.0 or 1.0 *)
+ let i = build_fcmp Fcmp.Ult lhs_val rhs_val "cmptmp" builder in
+ build_uitofp i double_type "booltmp" builder
+ | _ -&gt; raise (Error "invalid binary operator")
+ end
+ | Ast.Call (callee, args) -&gt;
+ (* Look up the name in the module table. *)
+ let callee =
+ match lookup_function callee the_module with
+ | Some callee -&gt; callee
+ | None -&gt; raise (Error "unknown function referenced")
+ in
+ let params = params callee in
+
+ (* If argument mismatch error. *)
+ if Array.length params == Array.length args then () else
+ raise (Error "incorrect # arguments passed");
+ let args = Array.map codegen_expr args in
+ build_call callee args "calltmp" builder
+ | Ast.If (cond, then_, else_) -&gt;
+ let cond = codegen_expr cond in
+
+ (* Convert condition to a bool by comparing equal to 0.0 *)
+ let zero = const_float double_type 0.0 in
+ let cond_val = build_fcmp Fcmp.One cond zero "ifcond" builder in
+
+ (* Grab the first block so that we might later add the conditional branch
+ * to it at the end of the function. *)
+ let start_bb = insertion_block builder in
+ let the_function = block_parent start_bb in
+
+ let then_bb = append_block context "then" the_function in
+
+ (* Emit 'then' value. *)
+ position_at_end then_bb builder;
+ let then_val = codegen_expr then_ in
+
+ (* Codegen of 'then' can change the current block, update then_bb for the
+ * phi. We create a new name because one is used for the phi node, and the
+ * other is used for the conditional branch. *)
+ let new_then_bb = insertion_block builder in
+
+ (* Emit 'else' value. *)
+ let else_bb = append_block context "else" the_function in
+ position_at_end else_bb builder;
+ let else_val = codegen_expr else_ in
+
+ (* Codegen of 'else' can change the current block, update else_bb for the
+ * phi. *)
+ let new_else_bb = insertion_block builder in
+
+ (* Emit merge block. *)
+ let merge_bb = append_block context "ifcont" the_function in
+ position_at_end merge_bb builder;
+ let incoming = [(then_val, new_then_bb); (else_val, new_else_bb)] in
+ let phi = build_phi incoming "iftmp" builder in
+
+ (* Return to the start block to add the conditional branch. *)
+ position_at_end start_bb builder;
+ ignore (build_cond_br cond_val then_bb else_bb builder);
+
+ (* Set a unconditional branch at the end of the 'then' block and the
+ * 'else' block to the 'merge' block. *)
+ position_at_end new_then_bb builder; ignore (build_br merge_bb builder);
+ position_at_end new_else_bb builder; ignore (build_br merge_bb builder);
+
+ (* Finally, set the builder to the end of the merge block. *)
+ position_at_end merge_bb builder;
+
+ phi
+ | Ast.For (var_name, start, end_, step, body) -&gt;
+ (* Emit the start code first, without 'variable' in scope. *)
+ let start_val = codegen_expr start in
+
+ (* Make the new basic block for the loop header, inserting after current
+ * block. *)
+ let preheader_bb = insertion_block builder in
+ let the_function = block_parent preheader_bb in
+ let loop_bb = append_block context "loop" the_function in
+
+ (* Insert an explicit fall through from the current block to the
+ * loop_bb. *)
+ ignore (build_br loop_bb builder);
+
+ (* Start insertion in loop_bb. *)
+ position_at_end loop_bb builder;
+
+ (* Start the PHI node with an entry for start. *)
+ let variable = build_phi [(start_val, preheader_bb)] var_name builder in
+
+ (* Within the loop, the variable is defined equal to the PHI node. If it
+ * shadows an existing variable, we have to restore it, so save it
+ * now. *)
+ let old_val =
+ try Some (Hashtbl.find named_values var_name) with Not_found -&gt; None
+ in
+ Hashtbl.add named_values var_name variable;
+
+ (* Emit the body of the loop. This, like any other expr, can change the
+ * current BB. Note that we ignore the value computed by the body, but
+ * don't allow an error *)
+ ignore (codegen_expr body);
+
+ (* Emit the step value. *)
+ let step_val =
+ match step with
+ | Some step -&gt; codegen_expr step
+ (* If not specified, use 1.0. *)
+ | None -&gt; const_float double_type 1.0
+ in
+
+ let next_var = build_add variable step_val "nextvar" builder in
+
+ (* Compute the end condition. *)
+ let end_cond = codegen_expr end_ in
+
+ (* Convert condition to a bool by comparing equal to 0.0. *)
+ let zero = const_float double_type 0.0 in
+ let end_cond = build_fcmp Fcmp.One end_cond zero "loopcond" builder in
+
+ (* Create the "after loop" block and insert it. *)
+ let loop_end_bb = insertion_block builder in
+ let after_bb = append_block context "afterloop" the_function in
+
+ (* Insert the conditional branch into the end of loop_end_bb. *)
+ ignore (build_cond_br end_cond loop_bb after_bb builder);
+
+ (* Any new code will be inserted in after_bb. *)
+ position_at_end after_bb builder;
+
+ (* Add a new entry to the PHI node for the backedge. *)
+ add_incoming (next_var, loop_end_bb) variable;
+
+ (* Restore the unshadowed variable. *)
+ begin match old_val with
+ | Some old_val -&gt; Hashtbl.add named_values var_name old_val
+ | None -&gt; ()
+ end;
+
+ (* for expr always returns 0.0. *)
+ const_null double_type
+
+let codegen_proto = function
+ | Ast.Prototype (name, args) -&gt;
+ (* Make the function type: double(double,double) etc. *)
+ let doubles = Array.make (Array.length args) double_type in
+ let ft = function_type double_type doubles in
+ let f =
+ match lookup_function name the_module with
+ | None -&gt; declare_function name ft the_module
+
+ (* If 'f' conflicted, there was already something named 'name'. If it
+ * has a body, don't allow redefinition or reextern. *)
+ | Some f -&gt;
+ (* If 'f' already has a body, reject this. *)
+ if block_begin f &lt;&gt; At_end f then
+ raise (Error "redefinition of function");
+
+ (* If 'f' took a different number of arguments, reject. *)
+ if element_type (type_of f) &lt;&gt; ft then
+ raise (Error "redefinition of function with different # args");
+ f
+ in
+
+ (* Set names for all arguments. *)
+ Array.iteri (fun i a -&gt;
+ let n = args.(i) in
+ set_value_name n a;
+ Hashtbl.add named_values n a;
+ ) (params f);
+ f
+
+let codegen_func the_fpm = function
+ | Ast.Function (proto, body) -&gt;
+ Hashtbl.clear named_values;
+ let the_function = codegen_proto proto in
+
+ (* Create a new basic block to start insertion into. *)
+ let bb = append_block context "entry" the_function in
+ position_at_end bb builder;
+
+ try
+ let ret_val = codegen_expr body in
+
+ (* Finish off the function. *)
+ let _ = build_ret ret_val builder in
+
+ (* Validate the generated code, checking for consistency. *)
+ Llvm_analysis.assert_valid_function the_function;
+
+ (* Optimize the function. *)
+ let _ = PassManager.run_function the_function the_fpm in
+
+ the_function
+ with e -&gt;
+ delete_function the_function;
+ raise e
+</pre>
+</dd>
+
+<dt>toplevel.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Top-Level parsing and JIT Driver
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+open Llvm_executionengine
+
+(* top ::= definition | external | expression | ';' *)
+let rec main_loop the_fpm the_execution_engine stream =
+ match Stream.peek stream with
+ | None -&gt; ()
+
+ (* ignore top-level semicolons. *)
+ | Some (Token.Kwd ';') -&gt;
+ Stream.junk stream;
+ main_loop the_fpm the_execution_engine stream
+
+ | Some token -&gt;
+ begin
+ try match token with
+ | Token.Def -&gt;
+ let e = Parser.parse_definition stream in
+ print_endline "parsed a function definition.";
+ dump_value (Codegen.codegen_func the_fpm e);
+ | Token.Extern -&gt;
+ let e = Parser.parse_extern stream in
+ print_endline "parsed an extern.";
+ dump_value (Codegen.codegen_proto e);
+ | _ -&gt;
+ (* Evaluate a top-level expression into an anonymous function. *)
+ let e = Parser.parse_toplevel stream in
+ print_endline "parsed a top-level expr";
+ let the_function = Codegen.codegen_func the_fpm e in
+ dump_value the_function;
+
+ (* JIT the function, returning a function pointer. *)
+ let result = ExecutionEngine.run_function the_function [||]
+ the_execution_engine in
+
+ print_string "Evaluated to ";
+ print_float (GenericValue.as_float Codegen.double_type result);
+ print_newline ();
+ with Stream.Error s | Codegen.Error s -&gt;
+ (* Skip token for error recovery. *)
+ Stream.junk stream;
+ print_endline s;
+ end;
+ print_string "ready&gt; "; flush stdout;
+ main_loop the_fpm the_execution_engine stream
+</pre>
+</dd>
+
+<dt>toy.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Main driver code.
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+open Llvm_executionengine
+open Llvm_target
+open Llvm_scalar_opts
+
+let main () =
+ ignore (initialize_native_target ());
+
+ (* Install standard binary operators.
+ * 1 is the lowest precedence. *)
+ Hashtbl.add Parser.binop_precedence '&lt;' 10;
+ Hashtbl.add Parser.binop_precedence '+' 20;
+ Hashtbl.add Parser.binop_precedence '-' 20;
+ Hashtbl.add Parser.binop_precedence '*' 40; (* highest. *)
+
+ (* Prime the first token. *)
+ print_string "ready&gt; "; flush stdout;
+ let stream = Lexer.lex (Stream.of_channel stdin) in
+
+ (* Create the JIT. *)
+ let the_execution_engine = ExecutionEngine.create Codegen.the_module in
+ let the_fpm = PassManager.create_function Codegen.the_module in
+
+ (* Set up the optimizer pipeline. Start with registering info about how the
+ * target lays out data structures. *)
+ TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm;
+
+ (* Do simple "peephole" optimizations and bit-twiddling optzn. *)
+ add_instruction_combination the_fpm;
+
+ (* reassociate expressions. *)
+ add_reassociation the_fpm;
+
+ (* Eliminate Common SubExpressions. *)
+ add_gvn the_fpm;
+
+ (* Simplify the control flow graph (deleting unreachable blocks, etc). *)
+ add_cfg_simplification the_fpm;
+
+ ignore (PassManager.initialize the_fpm);
+
+ (* Run the main "interpreter loop" now. *)
+ Toplevel.main_loop the_fpm the_execution_engine stream;
+
+ (* Print out all the generated code. *)
+ dump_module Codegen.the_module
+;;
+
+main ()
+</pre>
+</dd>
+
+<dt>bindings.c</dt>
+<dd class="doc_code">
+<pre>
+#include &lt;stdio.h&gt;
+
+/* putchard - putchar that takes a double and returns 0. */
+extern double putchard(double X) {
+ putchar((char)X);
+ return 0;
+}
+</pre>
+</dd>
+</dl>
+
+<a href="OCamlLangImpl6.html">Next: Extending the language: user-defined
+operators</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="mailto:idadesub@users.sourceforge.net">Erick Tryzelaar</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/OCamlLangImpl6.html b/docs/tutorial/OCamlLangImpl6.html
new file mode 100644
index 00000000000..2ee5089721c
--- /dev/null
+++ b/docs/tutorial/OCamlLangImpl6.html
@@ -0,0 +1,1574 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Extending the Language: User-defined Operators</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <meta name="author" content="Erick Tryzelaar">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Extending the Language: User-defined Operators</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 6
+ <ol>
+ <li><a href="#intro">Chapter 6 Introduction</a></li>
+ <li><a href="#idea">User-defined Operators: the Idea</a></li>
+ <li><a href="#binary">User-defined Binary Operators</a></li>
+ <li><a href="#unary">User-defined Unary Operators</a></li>
+ <li><a href="#example">Kicking the Tires</a></li>
+ <li><a href="#code">Full Code Listing</a></li>
+ </ol>
+</li>
+<li><a href="OCamlLangImpl7.html">Chapter 7</a>: Extending the Language: Mutable
+Variables / SSA Construction</li>
+</ul>
+
+<div class="doc_author">
+ <p>
+ Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a>
+ and <a href="mailto:idadesub@users.sourceforge.net">Erick Tryzelaar</a>
+ </p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Chapter 6 Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to Chapter 6 of the "<a href="index.html">Implementing a language
+with LLVM</a>" tutorial. At this point in our tutorial, we now have a fully
+functional language that is fairly minimal, but also useful. There
+is still one big problem with it, however. Our language doesn't have many
+useful operators (like division, logical negation, or even any comparisons
+besides less-than).</p>
+
+<p>This chapter of the tutorial takes a wild digression into adding user-defined
+operators to the simple and beautiful Kaleidoscope language. This digression now
+gives us a simple and ugly language in some ways, but also a powerful one at the
+same time. One of the great things about creating your own language is that you
+get to decide what is good or bad. In this tutorial we'll assume that it is
+okay to use this as a way to show some interesting parsing techniques.</p>
+
+<p>At the end of this tutorial, we'll run through an example Kaleidoscope
+application that <a href="#example">renders the Mandelbrot set</a>. This gives
+an example of what you can build with Kaleidoscope and its feature set.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="idea">User-defined Operators: the Idea</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+The "operator overloading" that we will add to Kaleidoscope is more general than
+languages like C++. In C++, you are only allowed to redefine existing
+operators: you can't programatically change the grammar, introduce new
+operators, change precedence levels, etc. In this chapter, we will add this
+capability to Kaleidoscope, which will let the user round out the set of
+operators that are supported.</p>
+
+<p>The point of going into user-defined operators in a tutorial like this is to
+show the power and flexibility of using a hand-written parser. Thus far, the parser
+we have been implementing uses recursive descent for most parts of the grammar and
+operator precedence parsing for the expressions. See <a
+href="OCamlLangImpl2.html">Chapter 2</a> for details. Without using operator
+precedence parsing, it would be very difficult to allow the programmer to
+introduce new operators into the grammar: the grammar is dynamically extensible
+as the JIT runs.</p>
+
+<p>The two specific features we'll add are programmable unary operators (right
+now, Kaleidoscope has no unary operators at all) as well as binary operators.
+An example of this is:</p>
+
+<div class="doc_code">
+<pre>
+# Logical unary not.
+def unary!(v)
+ if v then
+ 0
+ else
+ 1;
+
+# Define &gt; with the same precedence as &lt;.
+def binary&gt; 10 (LHS RHS)
+ RHS &lt; LHS;
+
+# Binary "logical or", (note that it does not "short circuit")
+def binary| 5 (LHS RHS)
+ if LHS then
+ 1
+ else if RHS then
+ 1
+ else
+ 0;
+
+# Define = with slightly lower precedence than relationals.
+def binary= 9 (LHS RHS)
+ !(LHS &lt; RHS | LHS &gt; RHS);
+</pre>
+</div>
+
+<p>Many languages aspire to being able to implement their standard runtime
+library in the language itself. In Kaleidoscope, we can implement significant
+parts of the language in the library!</p>
+
+<p>We will break down implementation of these features into two parts:
+implementing support for user-defined binary operators and adding unary
+operators.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="binary">User-defined Binary Operators</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Adding support for user-defined binary operators is pretty simple with our
+current framework. We'll first add support for the unary/binary keywords:</p>
+
+<div class="doc_code">
+<pre>
+type token =
+ ...
+ <b>(* operators *)
+ | Binary | Unary</b>
+
+...
+
+and lex_ident buffer = parser
+ ...
+ | "for" -&gt; [&lt; 'Token.For; stream &gt;]
+ | "in" -&gt; [&lt; 'Token.In; stream &gt;]
+ <b>| "binary" -&gt; [&lt; 'Token.Binary; stream &gt;]
+ | "unary" -&gt; [&lt; 'Token.Unary; stream &gt;]</b>
+</pre>
+</div>
+
+<p>This just adds lexer support for the unary and binary keywords, like we
+did in <a href="OCamlLangImpl5.html#iflexer">previous chapters</a>. One nice
+thing about our current AST, is that we represent binary operators with full
+generalisation by using their ASCII code as the opcode. For our extended
+operators, we'll use this same representation, so we don't need any new AST or
+parser support.</p>
+
+<p>On the other hand, we have to be able to represent the definitions of these
+new operators, in the "def binary| 5" part of the function definition. In our
+grammar so far, the "name" for the function definition is parsed as the
+"prototype" production and into the <tt>Ast.Prototype</tt> AST node. To
+represent our new user-defined operators as prototypes, we have to extend
+the <tt>Ast.Prototype</tt> AST node like this:</p>
+
+<div class="doc_code">
+<pre>
+(* proto - This type represents the "prototype" for a function, which captures
+ * its name, and its argument names (thus implicitly the number of arguments the
+ * function takes). *)
+type proto =
+ | Prototype of string * string array
+ <b>| BinOpPrototype of string * string array * int</b>
+</pre>
+</div>
+
+<p>Basically, in addition to knowing a name for the prototype, we now keep track
+of whether it was an operator, and if it was, what precedence level the operator
+is at. The precedence is only used for binary operators (as you'll see below,
+it just doesn't apply for unary operators). Now that we have a way to represent
+the prototype for a user-defined operator, we need to parse it:</p>
+
+<div class="doc_code">
+<pre>
+(* prototype
+ * ::= id '(' id* ')'
+ <b>* ::= binary LETTER number? (id, id)
+ * ::= unary LETTER number? (id) *)</b>
+let parse_prototype =
+ let rec parse_args accumulator = parser
+ | [&lt; 'Token.Ident id; e=parse_args (id::accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; accumulator
+ in
+ let parse_operator = parser
+ | [&lt; 'Token.Unary &gt;] -&gt; "unary", 1
+ | [&lt; 'Token.Binary &gt;] -&gt; "binary", 2
+ in
+ let parse_binary_precedence = parser
+ | [&lt; 'Token.Number n &gt;] -&gt; int_of_float n
+ | [&lt; &gt;] -&gt; 30
+ in
+ parser
+ | [&lt; 'Token.Ident id;
+ 'Token.Kwd '(' ?? "expected '(' in prototype";
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')' in prototype" &gt;] -&gt;
+ (* success. *)
+ Ast.Prototype (id, Array.of_list (List.rev args))
+ <b>| [&lt; (prefix, kind)=parse_operator;
+ 'Token.Kwd op ?? "expected an operator";
+ (* Read the precedence if present. *)
+ binary_precedence=parse_binary_precedence;
+ 'Token.Kwd '(' ?? "expected '(' in prototype";
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')' in prototype" &gt;] -&gt;
+ let name = prefix ^ (String.make 1 op) in
+ let args = Array.of_list (List.rev args) in
+
+ (* Verify right number of arguments for operator. *)
+ if Array.length args != kind
+ then raise (Stream.Error "invalid number of operands for operator")
+ else
+ if kind == 1 then
+ Ast.Prototype (name, args)
+ else
+ Ast.BinOpPrototype (name, args, binary_precedence)</b>
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected function name in prototype")
+</pre>
+</div>
+
+<p>This is all fairly straightforward parsing code, and we have already seen
+a lot of similar code in the past. One interesting part about the code above is
+the couple lines that set up <tt>name</tt> for binary operators. This builds
+names like "binary@" for a newly defined "@" operator. This then takes
+advantage of the fact that symbol names in the LLVM symbol table are allowed to
+have any character in them, including embedded nul characters.</p>
+
+<p>The next interesting thing to add, is codegen support for these binary
+operators. Given our current structure, this is a simple addition of a default
+case for our existing binary operator node:</p>
+
+<div class="doc_code">
+<pre>
+let codegen_expr = function
+ ...
+ | Ast.Binary (op, lhs, rhs) -&gt;
+ let lhs_val = codegen_expr lhs in
+ let rhs_val = codegen_expr rhs in
+ begin
+ match op with
+ | '+' -&gt; build_add lhs_val rhs_val "addtmp" builder
+ | '-' -&gt; build_sub lhs_val rhs_val "subtmp" builder
+ | '*' -&gt; build_mul lhs_val rhs_val "multmp" builder
+ | '&lt;' -&gt;
+ (* Convert bool 0/1 to double 0.0 or 1.0 *)
+ let i = build_fcmp Fcmp.Ult lhs_val rhs_val "cmptmp" builder in
+ build_uitofp i double_type "booltmp" builder
+ <b>| _ -&gt;
+ (* If it wasn't a builtin binary operator, it must be a user defined
+ * one. Emit a call to it. *)
+ let callee = "binary" ^ (String.make 1 op) in
+ let callee =
+ match lookup_function callee the_module with
+ | Some callee -&gt; callee
+ | None -&gt; raise (Error "binary operator not found!")
+ in
+ build_call callee [|lhs_val; rhs_val|] "binop" builder</b>
+ end
+</pre>
+</div>
+
+<p>As you can see above, the new code is actually really simple. It just does
+a lookup for the appropriate operator in the symbol table and generates a
+function call to it. Since user-defined operators are just built as normal
+functions (because the "prototype" boils down to a function with the right
+name) everything falls into place.</p>
+
+<p>The final piece of code we are missing, is a bit of top level magic:</p>
+
+<div class="doc_code">
+<pre>
+let codegen_func the_fpm = function
+ | Ast.Function (proto, body) -&gt;
+ Hashtbl.clear named_values;
+ let the_function = codegen_proto proto in
+
+ <b>(* If this is an operator, install it. *)
+ begin match proto with
+ | Ast.BinOpPrototype (name, args, prec) -&gt;
+ let op = name.[String.length name - 1] in
+ Hashtbl.add Parser.binop_precedence op prec;
+ | _ -&gt; ()
+ end;</b>
+
+ (* Create a new basic block to start insertion into. *)
+ let bb = append_block context "entry" the_function in
+ position_at_end bb builder;
+ ...
+</pre>
+</div>
+
+<p>Basically, before codegening a function, if it is a user-defined operator, we
+register it in the precedence table. This allows the binary operator parsing
+logic we already have in place to handle it. Since we are working on a
+fully-general operator precedence parser, this is all we need to do to "extend
+the grammar".</p>
+
+<p>Now we have useful user-defined binary operators. This builds a lot
+on the previous framework we built for other operators. Adding unary operators
+is a bit more challenging, because we don't have any framework for it yet - lets
+see what it takes.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="unary">User-defined Unary Operators</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Since we don't currently support unary operators in the Kaleidoscope
+language, we'll need to add everything to support them. Above, we added simple
+support for the 'unary' keyword to the lexer. In addition to that, we need an
+AST node:</p>
+
+<div class="doc_code">
+<pre>
+type expr =
+ ...
+ (* variant for a unary operator. *)
+ | Unary of char * expr
+ ...
+</pre>
+</div>
+
+<p>This AST node is very simple and obvious by now. It directly mirrors the
+binary operator AST node, except that it only has one child. With this, we
+need to add the parsing logic. Parsing a unary operator is pretty simple: we'll
+add a new function to do it:</p>
+
+<div class="doc_code">
+<pre>
+(* unary
+ * ::= primary
+ * ::= '!' unary *)
+and parse_unary = parser
+ (* If this is a unary operator, read it. *)
+ | [&lt; 'Token.Kwd op when op != '(' &amp;&amp; op != ')'; operand=parse_expr &gt;] -&gt;
+ Ast.Unary (op, operand)
+
+ (* If the current token is not an operator, it must be a primary expr. *)
+ | [&lt; stream &gt;] -&gt; parse_primary stream
+</pre>
+</div>
+
+<p>The grammar we add is pretty straightforward here. If we see a unary
+operator when parsing a primary operator, we eat the operator as a prefix and
+parse the remaining piece as another unary operator. This allows us to handle
+multiple unary operators (e.g. "!!x"). Note that unary operators can't have
+ambiguous parses like binary operators can, so there is no need for precedence
+information.</p>
+
+<p>The problem with this function, is that we need to call ParseUnary from
+somewhere. To do this, we change previous callers of ParsePrimary to call
+<tt>parse_unary</tt> instead:</p>
+
+<div class="doc_code">
+<pre>
+(* binoprhs
+ * ::= ('+' primary)* *)
+and parse_bin_rhs expr_prec lhs stream =
+ ...
+ <b>(* Parse the unary expression after the binary operator. *)
+ let rhs = parse_unary stream in</b>
+ ...
+
+...
+
+(* expression
+ * ::= primary binoprhs *)
+and parse_expr = parser
+ | [&lt; lhs=<b>parse_unary</b>; stream &gt;] -&gt; parse_bin_rhs 0 lhs stream
+</pre>
+</div>
+
+<p>With these two simple changes, we are now able to parse unary operators and build the
+AST for them. Next up, we need to add parser support for prototypes, to parse
+the unary operator prototype. We extend the binary operator code above
+with:</p>
+
+<div class="doc_code">
+<pre>
+(* prototype
+ * ::= id '(' id* ')'
+ * ::= binary LETTER number? (id, id)
+ <b>* ::= unary LETTER number? (id)</b> *)
+let parse_prototype =
+ let rec parse_args accumulator = parser
+ | [&lt; 'Token.Ident id; e=parse_args (id::accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; accumulator
+ in
+ <b>let parse_operator = parser
+ | [&lt; 'Token.Unary &gt;] -&gt; "unary", 1
+ | [&lt; 'Token.Binary &gt;] -&gt; "binary", 2
+ in</b>
+ let parse_binary_precedence = parser
+ | [&lt; 'Token.Number n &gt;] -&gt; int_of_float n
+ | [&lt; &gt;] -&gt; 30
+ in
+ parser
+ | [&lt; 'Token.Ident id;
+ 'Token.Kwd '(' ?? "expected '(' in prototype";
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')' in prototype" &gt;] -&gt;
+ (* success. *)
+ Ast.Prototype (id, Array.of_list (List.rev args))
+ <b>| [&lt; (prefix, kind)=parse_operator;
+ 'Token.Kwd op ?? "expected an operator";
+ (* Read the precedence if present. *)
+ binary_precedence=parse_binary_precedence;
+ 'Token.Kwd '(' ?? "expected '(' in prototype";
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')' in prototype" &gt;] -&gt;
+ let name = prefix ^ (String.make 1 op) in
+ let args = Array.of_list (List.rev args) in
+
+ (* Verify right number of arguments for operator. *)
+ if Array.length args != kind
+ then raise (Stream.Error "invalid number of operands for operator")
+ else
+ if kind == 1 then
+ Ast.Prototype (name, args)
+ else
+ Ast.BinOpPrototype (name, args, binary_precedence)</b>
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected function name in prototype")
+</pre>
+</div>
+
+<p>As with binary operators, we name unary operators with a name that includes
+the operator character. This assists us at code generation time. Speaking of,
+the final piece we need to add is codegen support for unary operators. It looks
+like this:</p>
+
+<div class="doc_code">
+<pre>
+let rec codegen_expr = function
+ ...
+ | Ast.Unary (op, operand) -&gt;
+ let operand = codegen_expr operand in
+ let callee = "unary" ^ (String.make 1 op) in
+ let callee =
+ match lookup_function callee the_module with
+ | Some callee -&gt; callee
+ | None -&gt; raise (Error "unknown unary operator")
+ in
+ build_call callee [|operand|] "unop" builder
+</pre>
+</div>
+
+<p>This code is similar to, but simpler than, the code for binary operators. It
+is simpler primarily because it doesn't need to handle any predefined operators.
+</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="example">Kicking the Tires</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>It is somewhat hard to believe, but with a few simple extensions we've
+covered in the last chapters, we have grown a real-ish language. With this, we
+can do a lot of interesting things, including I/O, math, and a bunch of other
+things. For example, we can now add a nice sequencing operator (printd is
+defined to print out the specified value and a newline):</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>extern printd(x);</b>
+Read extern: declare double @printd(double)
+ready&gt; <b>def binary : 1 (x y) 0; # Low-precedence operator that ignores operands.</b>
+..
+ready&gt; <b>printd(123) : printd(456) : printd(789);</b>
+123.000000
+456.000000
+789.000000
+Evaluated to 0.000000
+</pre>
+</div>
+
+<p>We can also define a bunch of other "primitive" operations, such as:</p>
+
+<div class="doc_code">
+<pre>
+# Logical unary not.
+def unary!(v)
+ if v then
+ 0
+ else
+ 1;
+
+# Unary negate.
+def unary-(v)
+ 0-v;
+
+# Define &gt; with the same precedence as &lt;.
+def binary&gt; 10 (LHS RHS)
+ RHS &lt; LHS;
+
+# Binary logical or, which does not short circuit.
+def binary| 5 (LHS RHS)
+ if LHS then
+ 1
+ else if RHS then
+ 1
+ else
+ 0;
+
+# Binary logical and, which does not short circuit.
+def binary&amp; 6 (LHS RHS)
+ if !LHS then
+ 0
+ else
+ !!RHS;
+
+# Define = with slightly lower precedence than relationals.
+def binary = 9 (LHS RHS)
+ !(LHS &lt; RHS | LHS &gt; RHS);
+
+</pre>
+</div>
+
+
+<p>Given the previous if/then/else support, we can also define interesting
+functions for I/O. For example, the following prints out a character whose
+"density" reflects the value passed in: the lower the value, the denser the
+character:</p>
+
+<div class="doc_code">
+<pre>
+ready&gt;
+<b>
+extern putchard(char)
+def printdensity(d)
+ if d &gt; 8 then
+ putchard(32) # ' '
+ else if d &gt; 4 then
+ putchard(46) # '.'
+ else if d &gt; 2 then
+ putchard(43) # '+'
+ else
+ putchard(42); # '*'</b>
+...
+ready&gt; <b>printdensity(1): printdensity(2): printdensity(3) :
+ printdensity(4): printdensity(5): printdensity(9): putchard(10);</b>
+*++..
+Evaluated to 0.000000
+</pre>
+</div>
+
+<p>Based on these simple primitive operations, we can start to define more
+interesting things. For example, here's a little function that solves for the
+number of iterations it takes a function in the complex plane to
+converge:</p>
+
+<div class="doc_code">
+<pre>
+# determine whether the specific location diverges.
+# Solve for z = z^2 + c in the complex plane.
+def mandleconverger(real imag iters creal cimag)
+ if iters &gt; 255 | (real*real + imag*imag &gt; 4) then
+ iters
+ else
+ mandleconverger(real*real - imag*imag + creal,
+ 2*real*imag + cimag,
+ iters+1, creal, cimag);
+
+# return the number of iterations required for the iteration to escape
+def mandleconverge(real imag)
+ mandleconverger(real, imag, 0, real, imag);
+</pre>
+</div>
+
+<p>This "z = z<sup>2</sup> + c" function is a beautiful little creature that is the basis
+for computation of the <a
+href="http://en.wikipedia.org/wiki/Mandelbrot_set">Mandelbrot Set</a>. Our
+<tt>mandelconverge</tt> function returns the number of iterations that it takes
+for a complex orbit to escape, saturating to 255. This is not a very useful
+function by itself, but if you plot its value over a two-dimensional plane,
+you can see the Mandelbrot set. Given that we are limited to using putchard
+here, our amazing graphical output is limited, but we can whip together
+something using the density plotter above:</p>
+
+<div class="doc_code">
+<pre>
+# compute and plot the mandlebrot set with the specified 2 dimensional range
+# info.
+def mandelhelp(xmin xmax xstep ymin ymax ystep)
+ for y = ymin, y &lt; ymax, ystep in (
+ (for x = xmin, x &lt; xmax, xstep in
+ printdensity(mandleconverge(x,y)))
+ : putchard(10)
+ )
+
+# mandel - This is a convenient helper function for plotting the mandelbrot set
+# from the specified position with the specified Magnification.
+def mandel(realstart imagstart realmag imagmag)
+ mandelhelp(realstart, realstart+realmag*78, realmag,
+ imagstart, imagstart+imagmag*40, imagmag);
+</pre>
+</div>
+
+<p>Given this, we can try plotting out the mandlebrot set! Lets try it out:</p>
+
+<div class="doc_code">
+<pre>
+ready&gt; <b>mandel(-2.3, -1.3, 0.05, 0.07);</b>
+*******************************+++++++++++*************************************
+*************************+++++++++++++++++++++++*******************************
+**********************+++++++++++++++++++++++++++++****************************
+*******************+++++++++++++++++++++.. ...++++++++*************************
+*****************++++++++++++++++++++++.... ...+++++++++***********************
+***************+++++++++++++++++++++++..... ...+++++++++*********************
+**************+++++++++++++++++++++++.... ....+++++++++********************
+*************++++++++++++++++++++++...... .....++++++++*******************
+************+++++++++++++++++++++....... .......+++++++******************
+***********+++++++++++++++++++.... ... .+++++++*****************
+**********+++++++++++++++++....... .+++++++****************
+*********++++++++++++++........... ...+++++++***************
+********++++++++++++............ ...++++++++**************
+********++++++++++... .......... .++++++++**************
+*******+++++++++..... .+++++++++*************
+*******++++++++...... ..+++++++++*************
+*******++++++....... ..+++++++++*************
+*******+++++...... ..+++++++++*************
+*******.... .... ...+++++++++*************
+*******.... . ...+++++++++*************
+*******+++++...... ...+++++++++*************
+*******++++++....... ..+++++++++*************
+*******++++++++...... .+++++++++*************
+*******+++++++++..... ..+++++++++*************
+********++++++++++... .......... .++++++++**************
+********++++++++++++............ ...++++++++**************
+*********++++++++++++++.......... ...+++++++***************
+**********++++++++++++++++........ .+++++++****************
+**********++++++++++++++++++++.... ... ..+++++++****************
+***********++++++++++++++++++++++....... .......++++++++*****************
+************+++++++++++++++++++++++...... ......++++++++******************
+**************+++++++++++++++++++++++.... ....++++++++********************
+***************+++++++++++++++++++++++..... ...+++++++++*********************
+*****************++++++++++++++++++++++.... ...++++++++***********************
+*******************+++++++++++++++++++++......++++++++*************************
+*********************++++++++++++++++++++++.++++++++***************************
+*************************+++++++++++++++++++++++*******************************
+******************************+++++++++++++************************************
+*******************************************************************************
+*******************************************************************************
+*******************************************************************************
+Evaluated to 0.000000
+ready&gt; <b>mandel(-2, -1, 0.02, 0.04);</b>
+**************************+++++++++++++++++++++++++++++++++++++++++++++++++++++
+***********************++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+*********************+++++++++++++++++++++++++++++++++++++++++++++++++++++++++.
+*******************+++++++++++++++++++++++++++++++++++++++++++++++++++++++++...
+*****************+++++++++++++++++++++++++++++++++++++++++++++++++++++++++.....
+***************++++++++++++++++++++++++++++++++++++++++++++++++++++++++........
+**************++++++++++++++++++++++++++++++++++++++++++++++++++++++...........
+************+++++++++++++++++++++++++++++++++++++++++++++++++++++..............
+***********++++++++++++++++++++++++++++++++++++++++++++++++++........ .
+**********++++++++++++++++++++++++++++++++++++++++++++++.............
+********+++++++++++++++++++++++++++++++++++++++++++..................
+*******+++++++++++++++++++++++++++++++++++++++.......................
+******+++++++++++++++++++++++++++++++++++...........................
+*****++++++++++++++++++++++++++++++++............................
+*****++++++++++++++++++++++++++++...............................
+****++++++++++++++++++++++++++...... .........................
+***++++++++++++++++++++++++......... ...... ...........
+***++++++++++++++++++++++............
+**+++++++++++++++++++++..............
+**+++++++++++++++++++................
+*++++++++++++++++++.................
+*++++++++++++++++............ ...
+*++++++++++++++..............
+*+++....++++................
+*.......... ...........
+*
+*.......... ...........
+*+++....++++................
+*++++++++++++++..............
+*++++++++++++++++............ ...
+*++++++++++++++++++.................
+**+++++++++++++++++++................
+**+++++++++++++++++++++..............
+***++++++++++++++++++++++............
+***++++++++++++++++++++++++......... ...... ...........
+****++++++++++++++++++++++++++...... .........................
+*****++++++++++++++++++++++++++++...............................
+*****++++++++++++++++++++++++++++++++............................
+******+++++++++++++++++++++++++++++++++++...........................
+*******+++++++++++++++++++++++++++++++++++++++.......................
+********+++++++++++++++++++++++++++++++++++++++++++..................
+Evaluated to 0.000000
+ready&gt; <b>mandel(-0.9, -1.4, 0.02, 0.03);</b>
+*******************************************************************************
+*******************************************************************************
+*******************************************************************************
+**********+++++++++++++++++++++************************************************
+*+++++++++++++++++++++++++++++++++++++++***************************************
++++++++++++++++++++++++++++++++++++++++++++++**********************************
+++++++++++++++++++++++++++++++++++++++++++++++++++*****************************
+++++++++++++++++++++++++++++++++++++++++++++++++++++++*************************
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++**********************
++++++++++++++++++++++++++++++++++.........++++++++++++++++++*******************
++++++++++++++++++++++++++++++++.... ......+++++++++++++++++++****************
++++++++++++++++++++++++++++++....... ........+++++++++++++++++++**************
+++++++++++++++++++++++++++++........ ........++++++++++++++++++++************
++++++++++++++++++++++++++++......... .. ...+++++++++++++++++++++**********
+++++++++++++++++++++++++++........... ....++++++++++++++++++++++********
+++++++++++++++++++++++++............. .......++++++++++++++++++++++******
++++++++++++++++++++++++............. ........+++++++++++++++++++++++****
+++++++++++++++++++++++........... ..........++++++++++++++++++++++***
+++++++++++++++++++++........... .........++++++++++++++++++++++*
+++++++++++++++++++............ ...........++++++++++++++++++++
+++++++++++++++++............... .............++++++++++++++++++
+++++++++++++++................. ...............++++++++++++++++
+++++++++++++.................. .................++++++++++++++
++++++++++.................. .................+++++++++++++
+++++++........ . ......... ..++++++++++++
+++............ ...... ....++++++++++
+.............. ...++++++++++
+.............. ....+++++++++
+.............. .....++++++++
+............. ......++++++++
+........... .......++++++++
+......... ........+++++++
+......... ........+++++++
+......... ....+++++++
+........ ...+++++++
+....... ...+++++++
+ ....+++++++
+ .....+++++++
+ ....+++++++
+ ....+++++++
+ ....+++++++
+Evaluated to 0.000000
+ready&gt; <b>^D</b>
+</pre>
+</div>
+
+<p>At this point, you may be starting to realize that Kaleidoscope is a real
+and powerful language. It may not be self-similar :), but it can be used to
+plot things that are!</p>
+
+<p>With this, we conclude the "adding user-defined operators" chapter of the
+tutorial. We have successfully augmented our language, adding the ability to
+extend the language in the library, and we have shown how this can be used to
+build a simple but interesting end-user application in Kaleidoscope. At this
+point, Kaleidoscope can build a variety of applications that are functional and
+can call functions with side-effects, but it can't actually define and mutate a
+variable itself.</p>
+
+<p>Strikingly, variable mutation is an important feature of some
+languages, and it is not at all obvious how to <a href="OCamlLangImpl7.html">add
+support for mutable variables</a> without having to add an "SSA construction"
+phase to your front-end. In the next chapter, we will describe how you can
+add variable mutation without building SSA in your front-end.</p>
+
+</div>
+
+
+<!-- *********************************************************************** -->
+<h2><a name="code">Full Code Listing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Here is the complete code listing for our running example, enhanced with the
+if/then/else and for expressions.. To build this example, use:
+</p>
+
+<div class="doc_code">
+<pre>
+# Compile
+ocamlbuild toy.byte
+# Run
+./toy.byte
+</pre>
+</div>
+
+<p>Here is the code:</p>
+
+<dl>
+<dt>_tags:</dt>
+<dd class="doc_code">
+<pre>
+&lt;{lexer,parser}.ml&gt;: use_camlp4, pp(camlp4of)
+&lt;*.{byte,native}&gt;: g++, use_llvm, use_llvm_analysis
+&lt;*.{byte,native}&gt;: use_llvm_executionengine, use_llvm_target
+&lt;*.{byte,native}&gt;: use_llvm_scalar_opts, use_bindings
+</pre>
+</dd>
+
+<dt>myocamlbuild.ml:</dt>
+<dd class="doc_code">
+<pre>
+open Ocamlbuild_plugin;;
+
+ocaml_lib ~extern:true "llvm";;
+ocaml_lib ~extern:true "llvm_analysis";;
+ocaml_lib ~extern:true "llvm_executionengine";;
+ocaml_lib ~extern:true "llvm_target";;
+ocaml_lib ~extern:true "llvm_scalar_opts";;
+
+flag ["link"; "ocaml"; "g++"] (S[A"-cc"; A"g++"; A"-cclib"; A"-rdynamic"]);;
+dep ["link"; "ocaml"; "use_bindings"] ["bindings.o"];;
+</pre>
+</dd>
+
+<dt>token.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Lexer Tokens
+ *===----------------------------------------------------------------------===*)
+
+(* The lexer returns these 'Kwd' if it is an unknown character, otherwise one of
+ * these others for known things. *)
+type token =
+ (* commands *)
+ | Def | Extern
+
+ (* primary *)
+ | Ident of string | Number of float
+
+ (* unknown *)
+ | Kwd of char
+
+ (* control *)
+ | If | Then | Else
+ | For | In
+
+ (* operators *)
+ | Binary | Unary
+</pre>
+</dd>
+
+<dt>lexer.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Lexer
+ *===----------------------------------------------------------------------===*)
+
+let rec lex = parser
+ (* Skip any whitespace. *)
+ | [&lt; ' (' ' | '\n' | '\r' | '\t'); stream &gt;] -&gt; lex stream
+
+ (* identifier: [a-zA-Z][a-zA-Z0-9] *)
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+
+ (* number: [0-9.]+ *)
+ | [&lt; ' ('0' .. '9' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+
+ (* Comment until end of line. *)
+ | [&lt; ' ('#'); stream &gt;] -&gt;
+ lex_comment stream
+
+ (* Otherwise, just return the character as its ascii value. *)
+ | [&lt; 'c; stream &gt;] -&gt;
+ [&lt; 'Token.Kwd c; lex stream &gt;]
+
+ (* end of stream. *)
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+
+and lex_number buffer = parser
+ | [&lt; ' ('0' .. '9' | '.' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ [&lt; 'Token.Number (float_of_string (Buffer.contents buffer)); stream &gt;]
+
+and lex_ident buffer = parser
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' | '0' .. '9' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ match Buffer.contents buffer with
+ | "def" -&gt; [&lt; 'Token.Def; stream &gt;]
+ | "extern" -&gt; [&lt; 'Token.Extern; stream &gt;]
+ | "if" -&gt; [&lt; 'Token.If; stream &gt;]
+ | "then" -&gt; [&lt; 'Token.Then; stream &gt;]
+ | "else" -&gt; [&lt; 'Token.Else; stream &gt;]
+ | "for" -&gt; [&lt; 'Token.For; stream &gt;]
+ | "in" -&gt; [&lt; 'Token.In; stream &gt;]
+ | "binary" -&gt; [&lt; 'Token.Binary; stream &gt;]
+ | "unary" -&gt; [&lt; 'Token.Unary; stream &gt;]
+ | id -&gt; [&lt; 'Token.Ident id; stream &gt;]
+
+and lex_comment = parser
+ | [&lt; ' ('\n'); stream=lex &gt;] -&gt; stream
+ | [&lt; 'c; e=lex_comment &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+</pre>
+</dd>
+
+<dt>ast.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Abstract Syntax Tree (aka Parse Tree)
+ *===----------------------------------------------------------------------===*)
+
+(* expr - Base type for all expression nodes. *)
+type expr =
+ (* variant for numeric literals like "1.0". *)
+ | Number of float
+
+ (* variant for referencing a variable, like "a". *)
+ | Variable of string
+
+ (* variant for a unary operator. *)
+ | Unary of char * expr
+
+ (* variant for a binary operator. *)
+ | Binary of char * expr * expr
+
+ (* variant for function calls. *)
+ | Call of string * expr array
+
+ (* variant for if/then/else. *)
+ | If of expr * expr * expr
+
+ (* variant for for/in. *)
+ | For of string * expr * expr * expr option * expr
+
+(* proto - This type represents the "prototype" for a function, which captures
+ * its name, and its argument names (thus implicitly the number of arguments the
+ * function takes). *)
+type proto =
+ | Prototype of string * string array
+ | BinOpPrototype of string * string array * int
+
+(* func - This type represents a function definition itself. *)
+type func = Function of proto * expr
+</pre>
+</dd>
+
+<dt>parser.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===---------------------------------------------------------------------===
+ * Parser
+ *===---------------------------------------------------------------------===*)
+
+(* binop_precedence - This holds the precedence for each binary operator that is
+ * defined *)
+let binop_precedence:(char, int) Hashtbl.t = Hashtbl.create 10
+
+(* precedence - Get the precedence of the pending binary operator token. *)
+let precedence c = try Hashtbl.find binop_precedence c with Not_found -&gt; -1
+
+(* primary
+ * ::= identifier
+ * ::= numberexpr
+ * ::= parenexpr
+ * ::= ifexpr
+ * ::= forexpr *)
+let rec parse_primary = parser
+ (* numberexpr ::= number *)
+ | [&lt; 'Token.Number n &gt;] -&gt; Ast.Number n
+
+ (* parenexpr ::= '(' expression ')' *)
+ | [&lt; 'Token.Kwd '('; e=parse_expr; 'Token.Kwd ')' ?? "expected ')'" &gt;] -&gt; e
+
+ (* identifierexpr
+ * ::= identifier
+ * ::= identifier '(' argumentexpr ')' *)
+ | [&lt; 'Token.Ident id; stream &gt;] -&gt;
+ let rec parse_args accumulator = parser
+ | [&lt; e=parse_expr; stream &gt;] -&gt;
+ begin parser
+ | [&lt; 'Token.Kwd ','; e=parse_args (e :: accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; e :: accumulator
+ end stream
+ | [&lt; &gt;] -&gt; accumulator
+ in
+ let rec parse_ident id = parser
+ (* Call. *)
+ | [&lt; 'Token.Kwd '(';
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')'"&gt;] -&gt;
+ Ast.Call (id, Array.of_list (List.rev args))
+
+ (* Simple variable ref. *)
+ | [&lt; &gt;] -&gt; Ast.Variable id
+ in
+ parse_ident id stream
+
+ (* ifexpr ::= 'if' expr 'then' expr 'else' expr *)
+ | [&lt; 'Token.If; c=parse_expr;
+ 'Token.Then ?? "expected 'then'"; t=parse_expr;
+ 'Token.Else ?? "expected 'else'"; e=parse_expr &gt;] -&gt;
+ Ast.If (c, t, e)
+
+ (* forexpr
+ ::= 'for' identifier '=' expr ',' expr (',' expr)? 'in' expression *)
+ | [&lt; 'Token.For;
+ 'Token.Ident id ?? "expected identifier after for";
+ 'Token.Kwd '=' ?? "expected '=' after for";
+ stream &gt;] -&gt;
+ begin parser
+ | [&lt;
+ start=parse_expr;
+ 'Token.Kwd ',' ?? "expected ',' after for";
+ end_=parse_expr;
+ stream &gt;] -&gt;
+ let step =
+ begin parser
+ | [&lt; 'Token.Kwd ','; step=parse_expr &gt;] -&gt; Some step
+ | [&lt; &gt;] -&gt; None
+ end stream
+ in
+ begin parser
+ | [&lt; 'Token.In; body=parse_expr &gt;] -&gt;
+ Ast.For (id, start, end_, step, body)
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected 'in' after for")
+ end stream
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected '=' after for")
+ end stream
+
+ | [&lt; &gt;] -&gt; raise (Stream.Error "unknown token when expecting an expression.")
+
+(* unary
+ * ::= primary
+ * ::= '!' unary *)
+and parse_unary = parser
+ (* If this is a unary operator, read it. *)
+ | [&lt; 'Token.Kwd op when op != '(' &amp;&amp; op != ')'; operand=parse_expr &gt;] -&gt;
+ Ast.Unary (op, operand)
+
+ (* If the current token is not an operator, it must be a primary expr. *)
+ | [&lt; stream &gt;] -&gt; parse_primary stream
+
+(* binoprhs
+ * ::= ('+' primary)* *)
+and parse_bin_rhs expr_prec lhs stream =
+ match Stream.peek stream with
+ (* If this is a binop, find its precedence. *)
+ | Some (Token.Kwd c) when Hashtbl.mem binop_precedence c -&gt;
+ let token_prec = precedence c in
+
+ (* If this is a binop that binds at least as tightly as the current binop,
+ * consume it, otherwise we are done. *)
+ if token_prec &lt; expr_prec then lhs else begin
+ (* Eat the binop. *)
+ Stream.junk stream;
+
+ (* Parse the unary expression after the binary operator. *)
+ let rhs = parse_unary stream in
+
+ (* Okay, we know this is a binop. *)
+ let rhs =
+ match Stream.peek stream with
+ | Some (Token.Kwd c2) -&gt;
+ (* If BinOp binds less tightly with rhs than the operator after
+ * rhs, let the pending operator take rhs as its lhs. *)
+ let next_prec = precedence c2 in
+ if token_prec &lt; next_prec
+ then parse_bin_rhs (token_prec + 1) rhs stream
+ else rhs
+ | _ -&gt; rhs
+ in
+
+ (* Merge lhs/rhs. *)
+ let lhs = Ast.Binary (c, lhs, rhs) in
+ parse_bin_rhs expr_prec lhs stream
+ end
+ | _ -&gt; lhs
+
+(* expression
+ * ::= primary binoprhs *)
+and parse_expr = parser
+ | [&lt; lhs=parse_unary; stream &gt;] -&gt; parse_bin_rhs 0 lhs stream
+
+(* prototype
+ * ::= id '(' id* ')'
+ * ::= binary LETTER number? (id, id)
+ * ::= unary LETTER number? (id) *)
+let parse_prototype =
+ let rec parse_args accumulator = parser
+ | [&lt; 'Token.Ident id; e=parse_args (id::accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; accumulator
+ in
+ let parse_operator = parser
+ | [&lt; 'Token.Unary &gt;] -&gt; "unary", 1
+ | [&lt; 'Token.Binary &gt;] -&gt; "binary", 2
+ in
+ let parse_binary_precedence = parser
+ | [&lt; 'Token.Number n &gt;] -&gt; int_of_float n
+ | [&lt; &gt;] -&gt; 30
+ in
+ parser
+ | [&lt; 'Token.Ident id;
+ 'Token.Kwd '(' ?? "expected '(' in prototype";
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')' in prototype" &gt;] -&gt;
+ (* success. *)
+ Ast.Prototype (id, Array.of_list (List.rev args))
+ | [&lt; (prefix, kind)=parse_operator;
+ 'Token.Kwd op ?? "expected an operator";
+ (* Read the precedence if present. *)
+ binary_precedence=parse_binary_precedence;
+ 'Token.Kwd '(' ?? "expected '(' in prototype";
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')' in prototype" &gt;] -&gt;
+ let name = prefix ^ (String.make 1 op) in
+ let args = Array.of_list (List.rev args) in
+
+ (* Verify right number of arguments for operator. *)
+ if Array.length args != kind
+ then raise (Stream.Error "invalid number of operands for operator")
+ else
+ if kind == 1 then
+ Ast.Prototype (name, args)
+ else
+ Ast.BinOpPrototype (name, args, binary_precedence)
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected function name in prototype")
+
+(* definition ::= 'def' prototype expression *)
+let parse_definition = parser
+ | [&lt; 'Token.Def; p=parse_prototype; e=parse_expr &gt;] -&gt;
+ Ast.Function (p, e)
+
+(* toplevelexpr ::= expression *)
+let parse_toplevel = parser
+ | [&lt; e=parse_expr &gt;] -&gt;
+ (* Make an anonymous proto. *)
+ Ast.Function (Ast.Prototype ("", [||]), e)
+
+(* external ::= 'extern' prototype *)
+let parse_extern = parser
+ | [&lt; 'Token.Extern; e=parse_prototype &gt;] -&gt; e
+</pre>
+</dd>
+
+<dt>codegen.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Code Generation
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+
+exception Error of string
+
+let context = global_context ()
+let the_module = create_module context "my cool jit"
+let builder = builder context
+let named_values:(string, llvalue) Hashtbl.t = Hashtbl.create 10
+let double_type = double_type context
+
+let rec codegen_expr = function
+ | Ast.Number n -&gt; const_float double_type n
+ | Ast.Variable name -&gt;
+ (try Hashtbl.find named_values name with
+ | Not_found -&gt; raise (Error "unknown variable name"))
+ | Ast.Unary (op, operand) -&gt;
+ let operand = codegen_expr operand in
+ let callee = "unary" ^ (String.make 1 op) in
+ let callee =
+ match lookup_function callee the_module with
+ | Some callee -&gt; callee
+ | None -&gt; raise (Error "unknown unary operator")
+ in
+ build_call callee [|operand|] "unop" builder
+ | Ast.Binary (op, lhs, rhs) -&gt;
+ let lhs_val = codegen_expr lhs in
+ let rhs_val = codegen_expr rhs in
+ begin
+ match op with
+ | '+' -&gt; build_add lhs_val rhs_val "addtmp" builder
+ | '-' -&gt; build_sub lhs_val rhs_val "subtmp" builder
+ | '*' -&gt; build_mul lhs_val rhs_val "multmp" builder
+ | '&lt;' -&gt;
+ (* Convert bool 0/1 to double 0.0 or 1.0 *)
+ let i = build_fcmp Fcmp.Ult lhs_val rhs_val "cmptmp" builder in
+ build_uitofp i double_type "booltmp" builder
+ | _ -&gt;
+ (* If it wasn't a builtin binary operator, it must be a user defined
+ * one. Emit a call to it. *)
+ let callee = "binary" ^ (String.make 1 op) in
+ let callee =
+ match lookup_function callee the_module with
+ | Some callee -&gt; callee
+ | None -&gt; raise (Error "binary operator not found!")
+ in
+ build_call callee [|lhs_val; rhs_val|] "binop" builder
+ end
+ | Ast.Call (callee, args) -&gt;
+ (* Look up the name in the module table. *)
+ let callee =
+ match lookup_function callee the_module with
+ | Some callee -&gt; callee
+ | None -&gt; raise (Error "unknown function referenced")
+ in
+ let params = params callee in
+
+ (* If argument mismatch error. *)
+ if Array.length params == Array.length args then () else
+ raise (Error "incorrect # arguments passed");
+ let args = Array.map codegen_expr args in
+ build_call callee args "calltmp" builder
+ | Ast.If (cond, then_, else_) -&gt;
+ let cond = codegen_expr cond in
+
+ (* Convert condition to a bool by comparing equal to 0.0 *)
+ let zero = const_float double_type 0.0 in
+ let cond_val = build_fcmp Fcmp.One cond zero "ifcond" builder in
+
+ (* Grab the first block so that we might later add the conditional branch
+ * to it at the end of the function. *)
+ let start_bb = insertion_block builder in
+ let the_function = block_parent start_bb in
+
+ let then_bb = append_block context "then" the_function in
+
+ (* Emit 'then' value. *)
+ position_at_end then_bb builder;
+ let then_val = codegen_expr then_ in
+
+ (* Codegen of 'then' can change the current block, update then_bb for the
+ * phi. We create a new name because one is used for the phi node, and the
+ * other is used for the conditional branch. *)
+ let new_then_bb = insertion_block builder in
+
+ (* Emit 'else' value. *)
+ let else_bb = append_block context "else" the_function in
+ position_at_end else_bb builder;
+ let else_val = codegen_expr else_ in
+
+ (* Codegen of 'else' can change the current block, update else_bb for the
+ * phi. *)
+ let new_else_bb = insertion_block builder in
+
+ (* Emit merge block. *)
+ let merge_bb = append_block context "ifcont" the_function in
+ position_at_end merge_bb builder;
+ let incoming = [(then_val, new_then_bb); (else_val, new_else_bb)] in
+ let phi = build_phi incoming "iftmp" builder in
+
+ (* Return to the start block to add the conditional branch. *)
+ position_at_end start_bb builder;
+ ignore (build_cond_br cond_val then_bb else_bb builder);
+
+ (* Set a unconditional branch at the end of the 'then' block and the
+ * 'else' block to the 'merge' block. *)
+ position_at_end new_then_bb builder; ignore (build_br merge_bb builder);
+ position_at_end new_else_bb builder; ignore (build_br merge_bb builder);
+
+ (* Finally, set the builder to the end of the merge block. *)
+ position_at_end merge_bb builder;
+
+ phi
+ | Ast.For (var_name, start, end_, step, body) -&gt;
+ (* Emit the start code first, without 'variable' in scope. *)
+ let start_val = codegen_expr start in
+
+ (* Make the new basic block for the loop header, inserting after current
+ * block. *)
+ let preheader_bb = insertion_block builder in
+ let the_function = block_parent preheader_bb in
+ let loop_bb = append_block context "loop" the_function in
+
+ (* Insert an explicit fall through from the current block to the
+ * loop_bb. *)
+ ignore (build_br loop_bb builder);
+
+ (* Start insertion in loop_bb. *)
+ position_at_end loop_bb builder;
+
+ (* Start the PHI node with an entry for start. *)
+ let variable = build_phi [(start_val, preheader_bb)] var_name builder in
+
+ (* Within the loop, the variable is defined equal to the PHI node. If it
+ * shadows an existing variable, we have to restore it, so save it
+ * now. *)
+ let old_val =
+ try Some (Hashtbl.find named_values var_name) with Not_found -&gt; None
+ in
+ Hashtbl.add named_values var_name variable;
+
+ (* Emit the body of the loop. This, like any other expr, can change the
+ * current BB. Note that we ignore the value computed by the body, but
+ * don't allow an error *)
+ ignore (codegen_expr body);
+
+ (* Emit the step value. *)
+ let step_val =
+ match step with
+ | Some step -&gt; codegen_expr step
+ (* If not specified, use 1.0. *)
+ | None -&gt; const_float double_type 1.0
+ in
+
+ let next_var = build_add variable step_val "nextvar" builder in
+
+ (* Compute the end condition. *)
+ let end_cond = codegen_expr end_ in
+
+ (* Convert condition to a bool by comparing equal to 0.0. *)
+ let zero = const_float double_type 0.0 in
+ let end_cond = build_fcmp Fcmp.One end_cond zero "loopcond" builder in
+
+ (* Create the "after loop" block and insert it. *)
+ let loop_end_bb = insertion_block builder in
+ let after_bb = append_block context "afterloop" the_function in
+
+ (* Insert the conditional branch into the end of loop_end_bb. *)
+ ignore (build_cond_br end_cond loop_bb after_bb builder);
+
+ (* Any new code will be inserted in after_bb. *)
+ position_at_end after_bb builder;
+
+ (* Add a new entry to the PHI node for the backedge. *)
+ add_incoming (next_var, loop_end_bb) variable;
+
+ (* Restore the unshadowed variable. *)
+ begin match old_val with
+ | Some old_val -&gt; Hashtbl.add named_values var_name old_val
+ | None -&gt; ()
+ end;
+
+ (* for expr always returns 0.0. *)
+ const_null double_type
+
+let codegen_proto = function
+ | Ast.Prototype (name, args) | Ast.BinOpPrototype (name, args, _) -&gt;
+ (* Make the function type: double(double,double) etc. *)
+ let doubles = Array.make (Array.length args) double_type in
+ let ft = function_type double_type doubles in
+ let f =
+ match lookup_function name the_module with
+ | None -&gt; declare_function name ft the_module
+
+ (* If 'f' conflicted, there was already something named 'name'. If it
+ * has a body, don't allow redefinition or reextern. *)
+ | Some f -&gt;
+ (* If 'f' already has a body, reject this. *)
+ if block_begin f &lt;&gt; At_end f then
+ raise (Error "redefinition of function");
+
+ (* If 'f' took a different number of arguments, reject. *)
+ if element_type (type_of f) &lt;&gt; ft then
+ raise (Error "redefinition of function with different # args");
+ f
+ in
+
+ (* Set names for all arguments. *)
+ Array.iteri (fun i a -&gt;
+ let n = args.(i) in
+ set_value_name n a;
+ Hashtbl.add named_values n a;
+ ) (params f);
+ f
+
+let codegen_func the_fpm = function
+ | Ast.Function (proto, body) -&gt;
+ Hashtbl.clear named_values;
+ let the_function = codegen_proto proto in
+
+ (* If this is an operator, install it. *)
+ begin match proto with
+ | Ast.BinOpPrototype (name, args, prec) -&gt;
+ let op = name.[String.length name - 1] in
+ Hashtbl.add Parser.binop_precedence op prec;
+ | _ -&gt; ()
+ end;
+
+ (* Create a new basic block to start insertion into. *)
+ let bb = append_block context "entry" the_function in
+ position_at_end bb builder;
+
+ try
+ let ret_val = codegen_expr body in
+
+ (* Finish off the function. *)
+ let _ = build_ret ret_val builder in
+
+ (* Validate the generated code, checking for consistency. *)
+ Llvm_analysis.assert_valid_function the_function;
+
+ (* Optimize the function. *)
+ let _ = PassManager.run_function the_function the_fpm in
+
+ the_function
+ with e -&gt;
+ delete_function the_function;
+ raise e
+</pre>
+</dd>
+
+<dt>toplevel.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Top-Level parsing and JIT Driver
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+open Llvm_executionengine
+
+(* top ::= definition | external | expression | ';' *)
+let rec main_loop the_fpm the_execution_engine stream =
+ match Stream.peek stream with
+ | None -&gt; ()
+
+ (* ignore top-level semicolons. *)
+ | Some (Token.Kwd ';') -&gt;
+ Stream.junk stream;
+ main_loop the_fpm the_execution_engine stream
+
+ | Some token -&gt;
+ begin
+ try match token with
+ | Token.Def -&gt;
+ let e = Parser.parse_definition stream in
+ print_endline "parsed a function definition.";
+ dump_value (Codegen.codegen_func the_fpm e);
+ | Token.Extern -&gt;
+ let e = Parser.parse_extern stream in
+ print_endline "parsed an extern.";
+ dump_value (Codegen.codegen_proto e);
+ | _ -&gt;
+ (* Evaluate a top-level expression into an anonymous function. *)
+ let e = Parser.parse_toplevel stream in
+ print_endline "parsed a top-level expr";
+ let the_function = Codegen.codegen_func the_fpm e in
+ dump_value the_function;
+
+ (* JIT the function, returning a function pointer. *)
+ let result = ExecutionEngine.run_function the_function [||]
+ the_execution_engine in
+
+ print_string "Evaluated to ";
+ print_float (GenericValue.as_float Codegen.double_type result);
+ print_newline ();
+ with Stream.Error s | Codegen.Error s -&gt;
+ (* Skip token for error recovery. *)
+ Stream.junk stream;
+ print_endline s;
+ end;
+ print_string "ready&gt; "; flush stdout;
+ main_loop the_fpm the_execution_engine stream
+</pre>
+</dd>
+
+<dt>toy.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Main driver code.
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+open Llvm_executionengine
+open Llvm_target
+open Llvm_scalar_opts
+
+let main () =
+ ignore (initialize_native_target ());
+
+ (* Install standard binary operators.
+ * 1 is the lowest precedence. *)
+ Hashtbl.add Parser.binop_precedence '&lt;' 10;
+ Hashtbl.add Parser.binop_precedence '+' 20;
+ Hashtbl.add Parser.binop_precedence '-' 20;
+ Hashtbl.add Parser.binop_precedence '*' 40; (* highest. *)
+
+ (* Prime the first token. *)
+ print_string "ready&gt; "; flush stdout;
+ let stream = Lexer.lex (Stream.of_channel stdin) in
+
+ (* Create the JIT. *)
+ let the_execution_engine = ExecutionEngine.create Codegen.the_module in
+ let the_fpm = PassManager.create_function Codegen.the_module in
+
+ (* Set up the optimizer pipeline. Start with registering info about how the
+ * target lays out data structures. *)
+ TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm;
+
+ (* Do simple "peephole" optimizations and bit-twiddling optzn. *)
+ add_instruction_combination the_fpm;
+
+ (* reassociate expressions. *)
+ add_reassociation the_fpm;
+
+ (* Eliminate Common SubExpressions. *)
+ add_gvn the_fpm;
+
+ (* Simplify the control flow graph (deleting unreachable blocks, etc). *)
+ add_cfg_simplification the_fpm;
+
+ ignore (PassManager.initialize the_fpm);
+
+ (* Run the main "interpreter loop" now. *)
+ Toplevel.main_loop the_fpm the_execution_engine stream;
+
+ (* Print out all the generated code. *)
+ dump_module Codegen.the_module
+;;
+
+main ()
+</pre>
+</dd>
+
+<dt>bindings.c</dt>
+<dd class="doc_code">
+<pre>
+#include &lt;stdio.h&gt;
+
+/* putchard - putchar that takes a double and returns 0. */
+extern double putchard(double X) {
+ putchar((char)X);
+ return 0;
+}
+
+/* printd - printf that takes a double prints it as "%f\n", returning 0. */
+extern double printd(double X) {
+ printf("%f\n", X);
+ return 0;
+}
+</pre>
+</dd>
+</dl>
+
+<a href="OCamlLangImpl7.html">Next: Extending the language: mutable variables /
+SSA construction</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="mailto:idadesub@users.sourceforge.net">Erick Tryzelaar</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/OCamlLangImpl7.html b/docs/tutorial/OCamlLangImpl7.html
new file mode 100644
index 00000000000..d106ad0701c
--- /dev/null
+++ b/docs/tutorial/OCamlLangImpl7.html
@@ -0,0 +1,1904 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Extending the Language: Mutable Variables / SSA
+ construction</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <meta name="author" content="Erick Tryzelaar">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Extending the Language: Mutable Variables</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 7
+ <ol>
+ <li><a href="#intro">Chapter 7 Introduction</a></li>
+ <li><a href="#why">Why is this a hard problem?</a></li>
+ <li><a href="#memory">Memory in LLVM</a></li>
+ <li><a href="#kalvars">Mutable Variables in Kaleidoscope</a></li>
+ <li><a href="#adjustments">Adjusting Existing Variables for
+ Mutation</a></li>
+ <li><a href="#assignment">New Assignment Operator</a></li>
+ <li><a href="#localvars">User-defined Local Variables</a></li>
+ <li><a href="#code">Full Code Listing</a></li>
+ </ol>
+</li>
+<li><a href="OCamlLangImpl8.html">Chapter 8</a>: Conclusion and other useful LLVM
+ tidbits</li>
+</ul>
+
+<div class="doc_author">
+ <p>
+ Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a>
+ and <a href="mailto:idadesub@users.sourceforge.net">Erick Tryzelaar</a>
+ </p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="intro">Chapter 7 Introduction</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to Chapter 7 of the "<a href="index.html">Implementing a language
+with LLVM</a>" tutorial. In chapters 1 through 6, we've built a very
+respectable, albeit simple, <a
+href="http://en.wikipedia.org/wiki/Functional_programming">functional
+programming language</a>. In our journey, we learned some parsing techniques,
+how to build and represent an AST, how to build LLVM IR, and how to optimize
+the resultant code as well as JIT compile it.</p>
+
+<p>While Kaleidoscope is interesting as a functional language, the fact that it
+is functional makes it "too easy" to generate LLVM IR for it. In particular, a
+functional language makes it very easy to build LLVM IR directly in <a
+href="http://en.wikipedia.org/wiki/Static_single_assignment_form">SSA form</a>.
+Since LLVM requires that the input code be in SSA form, this is a very nice
+property and it is often unclear to newcomers how to generate code for an
+imperative language with mutable variables.</p>
+
+<p>The short (and happy) summary of this chapter is that there is no need for
+your front-end to build SSA form: LLVM provides highly tuned and well tested
+support for this, though the way it works is a bit unexpected for some.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="why">Why is this a hard problem?</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+To understand why mutable variables cause complexities in SSA construction,
+consider this extremely simple C example:
+</p>
+
+<div class="doc_code">
+<pre>
+int G, H;
+int test(_Bool Condition) {
+ int X;
+ if (Condition)
+ X = G;
+ else
+ X = H;
+ return X;
+}
+</pre>
+</div>
+
+<p>In this case, we have the variable "X", whose value depends on the path
+executed in the program. Because there are two different possible values for X
+before the return instruction, a PHI node is inserted to merge the two values.
+The LLVM IR that we want for this example looks like this:</p>
+
+<div class="doc_code">
+<pre>
+@G = weak global i32 0 ; type of @G is i32*
+@H = weak global i32 0 ; type of @H is i32*
+
+define i32 @test(i1 %Condition) {
+entry:
+ br i1 %Condition, label %cond_true, label %cond_false
+
+cond_true:
+ %X.0 = load i32* @G
+ br label %cond_next
+
+cond_false:
+ %X.1 = load i32* @H
+ br label %cond_next
+
+cond_next:
+ %X.2 = phi i32 [ %X.1, %cond_false ], [ %X.0, %cond_true ]
+ ret i32 %X.2
+}
+</pre>
+</div>
+
+<p>In this example, the loads from the G and H global variables are explicit in
+the LLVM IR, and they live in the then/else branches of the if statement
+(cond_true/cond_false). In order to merge the incoming values, the X.2 phi node
+in the cond_next block selects the right value to use based on where control
+flow is coming from: if control flow comes from the cond_false block, X.2 gets
+the value of X.1. Alternatively, if control flow comes from cond_true, it gets
+the value of X.0. The intent of this chapter is not to explain the details of
+SSA form. For more information, see one of the many <a
+href="http://en.wikipedia.org/wiki/Static_single_assignment_form">online
+references</a>.</p>
+
+<p>The question for this article is "who places the phi nodes when lowering
+assignments to mutable variables?". The issue here is that LLVM
+<em>requires</em> that its IR be in SSA form: there is no "non-ssa" mode for it.
+However, SSA construction requires non-trivial algorithms and data structures,
+so it is inconvenient and wasteful for every front-end to have to reproduce this
+logic.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="memory">Memory in LLVM</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>The 'trick' here is that while LLVM does require all register values to be
+in SSA form, it does not require (or permit) memory objects to be in SSA form.
+In the example above, note that the loads from G and H are direct accesses to
+G and H: they are not renamed or versioned. This differs from some other
+compiler systems, which do try to version memory objects. In LLVM, instead of
+encoding dataflow analysis of memory into the LLVM IR, it is handled with <a
+href="../WritingAnLLVMPass.html">Analysis Passes</a> which are computed on
+demand.</p>
+
+<p>
+With this in mind, the high-level idea is that we want to make a stack variable
+(which lives in memory, because it is on the stack) for each mutable object in
+a function. To take advantage of this trick, we need to talk about how LLVM
+represents stack variables.
+</p>
+
+<p>In LLVM, all memory accesses are explicit with load/store instructions, and
+it is carefully designed not to have (or need) an "address-of" operator. Notice
+how the type of the @G/@H global variables is actually "i32*" even though the
+variable is defined as "i32". What this means is that @G defines <em>space</em>
+for an i32 in the global data area, but its <em>name</em> actually refers to the
+address for that space. Stack variables work the same way, except that instead of
+being declared with global variable definitions, they are declared with the
+<a href="../LangRef.html#i_alloca">LLVM alloca instruction</a>:</p>
+
+<div class="doc_code">
+<pre>
+define i32 @example() {
+entry:
+ %X = alloca i32 ; type of %X is i32*.
+ ...
+ %tmp = load i32* %X ; load the stack value %X from the stack.
+ %tmp2 = add i32 %tmp, 1 ; increment it
+ store i32 %tmp2, i32* %X ; store it back
+ ...
+</pre>
+</div>
+
+<p>This code shows an example of how you can declare and manipulate a stack
+variable in the LLVM IR. Stack memory allocated with the alloca instruction is
+fully general: you can pass the address of the stack slot to functions, you can
+store it in other variables, etc. In our example above, we could rewrite the
+example to use the alloca technique to avoid using a PHI node:</p>
+
+<div class="doc_code">
+<pre>
+@G = weak global i32 0 ; type of @G is i32*
+@H = weak global i32 0 ; type of @H is i32*
+
+define i32 @test(i1 %Condition) {
+entry:
+ %X = alloca i32 ; type of %X is i32*.
+ br i1 %Condition, label %cond_true, label %cond_false
+
+cond_true:
+ %X.0 = load i32* @G
+ store i32 %X.0, i32* %X ; Update X
+ br label %cond_next
+
+cond_false:
+ %X.1 = load i32* @H
+ store i32 %X.1, i32* %X ; Update X
+ br label %cond_next
+
+cond_next:
+ %X.2 = load i32* %X ; Read X
+ ret i32 %X.2
+}
+</pre>
+</div>
+
+<p>With this, we have discovered a way to handle arbitrary mutable variables
+without the need to create Phi nodes at all:</p>
+
+<ol>
+<li>Each mutable variable becomes a stack allocation.</li>
+<li>Each read of the variable becomes a load from the stack.</li>
+<li>Each update of the variable becomes a store to the stack.</li>
+<li>Taking the address of a variable just uses the stack address directly.</li>
+</ol>
+
+<p>While this solution has solved our immediate problem, it introduced another
+one: we have now apparently introduced a lot of stack traffic for very simple
+and common operations, a major performance problem. Fortunately for us, the
+LLVM optimizer has a highly-tuned optimization pass named "mem2reg" that handles
+this case, promoting allocas like this into SSA registers, inserting Phi nodes
+as appropriate. If you run this example through the pass, for example, you'll
+get:</p>
+
+<div class="doc_code">
+<pre>
+$ <b>llvm-as &lt; example.ll | opt -mem2reg | llvm-dis</b>
+@G = weak global i32 0
+@H = weak global i32 0
+
+define i32 @test(i1 %Condition) {
+entry:
+ br i1 %Condition, label %cond_true, label %cond_false
+
+cond_true:
+ %X.0 = load i32* @G
+ br label %cond_next
+
+cond_false:
+ %X.1 = load i32* @H
+ br label %cond_next
+
+cond_next:
+ %X.01 = phi i32 [ %X.1, %cond_false ], [ %X.0, %cond_true ]
+ ret i32 %X.01
+}
+</pre>
+</div>
+
+<p>The mem2reg pass implements the standard "iterated dominance frontier"
+algorithm for constructing SSA form and has a number of optimizations that speed
+up (very common) degenerate cases. The mem2reg optimization pass is the answer
+to dealing with mutable variables, and we highly recommend that you depend on
+it. Note that mem2reg only works on variables in certain circumstances:</p>
+
+<ol>
+<li>mem2reg is alloca-driven: it looks for allocas and if it can handle them, it
+promotes them. It does not apply to global variables or heap allocations.</li>
+
+<li>mem2reg only looks for alloca instructions in the entry block of the
+function. Being in the entry block guarantees that the alloca is only executed
+once, which makes analysis simpler.</li>
+
+<li>mem2reg only promotes allocas whose uses are direct loads and stores. If
+the address of the stack object is passed to a function, or if any funny pointer
+arithmetic is involved, the alloca will not be promoted.</li>
+
+<li>mem2reg only works on allocas of <a
+href="../LangRef.html#t_classifications">first class</a>
+values (such as pointers, scalars and vectors), and only if the array size
+of the allocation is 1 (or missing in the .ll file). mem2reg is not capable of
+promoting structs or arrays to registers. Note that the "scalarrepl" pass is
+more powerful and can promote structs, "unions", and arrays in many cases.</li>
+
+</ol>
+
+<p>
+All of these properties are easy to satisfy for most imperative languages, and
+we'll illustrate it below with Kaleidoscope. The final question you may be
+asking is: should I bother with this nonsense for my front-end? Wouldn't it be
+better if I just did SSA construction directly, avoiding use of the mem2reg
+optimization pass? In short, we strongly recommend that you use this technique
+for building SSA form, unless there is an extremely good reason not to. Using
+this technique is:</p>
+
+<ul>
+<li>Proven and well tested: llvm-gcc and clang both use this technique for local
+mutable variables. As such, the most common clients of LLVM are using this to
+handle a bulk of their variables. You can be sure that bugs are found fast and
+fixed early.</li>
+
+<li>Extremely Fast: mem2reg has a number of special cases that make it fast in
+common cases as well as fully general. For example, it has fast-paths for
+variables that are only used in a single block, variables that only have one
+assignment point, good heuristics to avoid insertion of unneeded phi nodes, etc.
+</li>
+
+<li>Needed for debug info generation: <a href="../SourceLevelDebugging.html">
+Debug information in LLVM</a> relies on having the address of the variable
+exposed so that debug info can be attached to it. This technique dovetails
+very naturally with this style of debug info.</li>
+</ul>
+
+<p>If nothing else, this makes it much easier to get your front-end up and
+running, and is very simple to implement. Lets extend Kaleidoscope with mutable
+variables now!
+</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="kalvars">Mutable Variables in Kaleidoscope</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Now that we know the sort of problem we want to tackle, lets see what this
+looks like in the context of our little Kaleidoscope language. We're going to
+add two features:</p>
+
+<ol>
+<li>The ability to mutate variables with the '=' operator.</li>
+<li>The ability to define new variables.</li>
+</ol>
+
+<p>While the first item is really what this is about, we only have variables
+for incoming arguments as well as for induction variables, and redefining those only
+goes so far :). Also, the ability to define new variables is a
+useful thing regardless of whether you will be mutating them. Here's a
+motivating example that shows how we could use these:</p>
+
+<div class="doc_code">
+<pre>
+# Define ':' for sequencing: as a low-precedence operator that ignores operands
+# and just returns the RHS.
+def binary : 1 (x y) y;
+
+# Recursive fib, we could do this before.
+def fib(x)
+ if (x &lt; 3) then
+ 1
+ else
+ fib(x-1)+fib(x-2);
+
+# Iterative fib.
+def fibi(x)
+ <b>var a = 1, b = 1, c in</b>
+ (for i = 3, i &lt; x in
+ <b>c = a + b</b> :
+ <b>a = b</b> :
+ <b>b = c</b>) :
+ b;
+
+# Call it.
+fibi(10);
+</pre>
+</div>
+
+<p>
+In order to mutate variables, we have to change our existing variables to use
+the "alloca trick". Once we have that, we'll add our new operator, then extend
+Kaleidoscope to support new variable definitions.
+</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="adjustments">Adjusting Existing Variables for Mutation</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+The symbol table in Kaleidoscope is managed at code generation time by the
+'<tt>named_values</tt>' map. This map currently keeps track of the LLVM
+"Value*" that holds the double value for the named variable. In order to
+support mutation, we need to change this slightly, so that it
+<tt>named_values</tt> holds the <em>memory location</em> of the variable in
+question. Note that this change is a refactoring: it changes the structure of
+the code, but does not (by itself) change the behavior of the compiler. All of
+these changes are isolated in the Kaleidoscope code generator.</p>
+
+<p>
+At this point in Kaleidoscope's development, it only supports variables for two
+things: incoming arguments to functions and the induction variable of 'for'
+loops. For consistency, we'll allow mutation of these variables in addition to
+other user-defined variables. This means that these will both need memory
+locations.
+</p>
+
+<p>To start our transformation of Kaleidoscope, we'll change the
+<tt>named_values</tt> map so that it maps to AllocaInst* instead of Value*.
+Once we do this, the C++ compiler will tell us what parts of the code we need to
+update:</p>
+
+<p><b>Note:</b> the ocaml bindings currently model both <tt>Value*</tt>s and
+<tt>AllocInst*</tt>s as <tt>Llvm.llvalue</tt>s, but this may change in the
+future to be more type safe.</p>
+
+<div class="doc_code">
+<pre>
+let named_values:(string, llvalue) Hashtbl.t = Hashtbl.create 10
+</pre>
+</div>
+
+<p>Also, since we will need to create these alloca's, we'll use a helper
+function that ensures that the allocas are created in the entry block of the
+function:</p>
+
+<div class="doc_code">
+<pre>
+(* Create an alloca instruction in the entry block of the function. This
+ * is used for mutable variables etc. *)
+let create_entry_block_alloca the_function var_name =
+ let builder = builder_at (instr_begin (entry_block the_function)) in
+ build_alloca double_type var_name builder
+</pre>
+</div>
+
+<p>This funny looking code creates an <tt>Llvm.llbuilder</tt> object that is
+pointing at the first instruction of the entry block. It then creates an alloca
+with the expected name and returns it. Because all values in Kaleidoscope are
+doubles, there is no need to pass in a type to use.</p>
+
+<p>With this in place, the first functionality change we want to make is to
+variable references. In our new scheme, variables live on the stack, so code
+generating a reference to them actually needs to produce a load from the stack
+slot:</p>
+
+<div class="doc_code">
+<pre>
+let rec codegen_expr = function
+ ...
+ | Ast.Variable name -&gt;
+ let v = try Hashtbl.find named_values name with
+ | Not_found -&gt; raise (Error "unknown variable name")
+ in
+ <b>(* Load the value. *)
+ build_load v name builder</b>
+</pre>
+</div>
+
+<p>As you can see, this is pretty straightforward. Now we need to update the
+things that define the variables to set up the alloca. We'll start with
+<tt>codegen_expr Ast.For ...</tt> (see the <a href="#code">full code listing</a>
+for the unabridged code):</p>
+
+<div class="doc_code">
+<pre>
+ | Ast.For (var_name, start, end_, step, body) -&gt;
+ let the_function = block_parent (insertion_block builder) in
+
+ (* Create an alloca for the variable in the entry block. *)
+ <b>let alloca = create_entry_block_alloca the_function var_name in</b>
+
+ (* Emit the start code first, without 'variable' in scope. *)
+ let start_val = codegen_expr start in
+
+ <b>(* Store the value into the alloca. *)
+ ignore(build_store start_val alloca builder);</b>
+
+ ...
+
+ (* Within the loop, the variable is defined equal to the PHI node. If it
+ * shadows an existing variable, we have to restore it, so save it
+ * now. *)
+ let old_val =
+ try Some (Hashtbl.find named_values var_name) with Not_found -&gt; None
+ in
+ <b>Hashtbl.add named_values var_name alloca;</b>
+
+ ...
+
+ (* Compute the end condition. *)
+ let end_cond = codegen_expr end_ in
+
+ <b>(* Reload, increment, and restore the alloca. This handles the case where
+ * the body of the loop mutates the variable. *)
+ let cur_var = build_load alloca var_name builder in
+ let next_var = build_add cur_var step_val "nextvar" builder in
+ ignore(build_store next_var alloca builder);</b>
+ ...
+</pre>
+</div>
+
+<p>This code is virtually identical to the code <a
+href="OCamlLangImpl5.html#forcodegen">before we allowed mutable variables</a>.
+The big difference is that we no longer have to construct a PHI node, and we use
+load/store to access the variable as needed.</p>
+
+<p>To support mutable argument variables, we need to also make allocas for them.
+The code for this is also pretty simple:</p>
+
+<div class="doc_code">
+<pre>
+(* Create an alloca for each argument and register the argument in the symbol
+ * table so that references to it will succeed. *)
+let create_argument_allocas the_function proto =
+ let args = match proto with
+ | Ast.Prototype (_, args) | Ast.BinOpPrototype (_, args, _) -&gt; args
+ in
+ Array.iteri (fun i ai -&gt;
+ let var_name = args.(i) in
+ (* Create an alloca for this variable. *)
+ let alloca = create_entry_block_alloca the_function var_name in
+
+ (* Store the initial value into the alloca. *)
+ ignore(build_store ai alloca builder);
+
+ (* Add arguments to variable symbol table. *)
+ Hashtbl.add named_values var_name alloca;
+ ) (params the_function)
+</pre>
+</div>
+
+<p>For each argument, we make an alloca, store the input value to the function
+into the alloca, and register the alloca as the memory location for the
+argument. This method gets invoked by <tt>Codegen.codegen_func</tt> right after
+it sets up the entry block for the function.</p>
+
+<p>The final missing piece is adding the mem2reg pass, which allows us to get
+good codegen once again:</p>
+
+<div class="doc_code">
+<pre>
+let main () =
+ ...
+ let the_fpm = PassManager.create_function Codegen.the_module in
+
+ (* Set up the optimizer pipeline. Start with registering info about how the
+ * target lays out data structures. *)
+ TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm;
+
+ <b>(* Promote allocas to registers. *)
+ add_memory_to_register_promotion the_fpm;</b>
+
+ (* Do simple "peephole" optimizations and bit-twiddling optzn. *)
+ add_instruction_combining the_fpm;
+
+ (* reassociate expressions. *)
+ add_reassociation the_fpm;
+</pre>
+</div>
+
+<p>It is interesting to see what the code looks like before and after the
+mem2reg optimization runs. For example, this is the before/after code for our
+recursive fib function. Before the optimization:</p>
+
+<div class="doc_code">
+<pre>
+define double @fib(double %x) {
+entry:
+ <b>%x1 = alloca double
+ store double %x, double* %x1
+ %x2 = load double* %x1</b>
+ %cmptmp = fcmp ult double %x2, 3.000000e+00
+ %booltmp = uitofp i1 %cmptmp to double
+ %ifcond = fcmp one double %booltmp, 0.000000e+00
+ br i1 %ifcond, label %then, label %else
+
+then: ; preds = %entry
+ br label %ifcont
+
+else: ; preds = %entry
+ <b>%x3 = load double* %x1</b>
+ %subtmp = fsub double %x3, 1.000000e+00
+ %calltmp = call double @fib(double %subtmp)
+ <b>%x4 = load double* %x1</b>
+ %subtmp5 = fsub double %x4, 2.000000e+00
+ %calltmp6 = call double @fib(double %subtmp5)
+ %addtmp = fadd double %calltmp, %calltmp6
+ br label %ifcont
+
+ifcont: ; preds = %else, %then
+ %iftmp = phi double [ 1.000000e+00, %then ], [ %addtmp, %else ]
+ ret double %iftmp
+}
+</pre>
+</div>
+
+<p>Here there is only one variable (x, the input argument) but you can still
+see the extremely simple-minded code generation strategy we are using. In the
+entry block, an alloca is created, and the initial input value is stored into
+it. Each reference to the variable does a reload from the stack. Also, note
+that we didn't modify the if/then/else expression, so it still inserts a PHI
+node. While we could make an alloca for it, it is actually easier to create a
+PHI node for it, so we still just make the PHI.</p>
+
+<p>Here is the code after the mem2reg pass runs:</p>
+
+<div class="doc_code">
+<pre>
+define double @fib(double %x) {
+entry:
+ %cmptmp = fcmp ult double <b>%x</b>, 3.000000e+00
+ %booltmp = uitofp i1 %cmptmp to double
+ %ifcond = fcmp one double %booltmp, 0.000000e+00
+ br i1 %ifcond, label %then, label %else
+
+then:
+ br label %ifcont
+
+else:
+ %subtmp = fsub double <b>%x</b>, 1.000000e+00
+ %calltmp = call double @fib(double %subtmp)
+ %subtmp5 = fsub double <b>%x</b>, 2.000000e+00
+ %calltmp6 = call double @fib(double %subtmp5)
+ %addtmp = fadd double %calltmp, %calltmp6
+ br label %ifcont
+
+ifcont: ; preds = %else, %then
+ %iftmp = phi double [ 1.000000e+00, %then ], [ %addtmp, %else ]
+ ret double %iftmp
+}
+</pre>
+</div>
+
+<p>This is a trivial case for mem2reg, since there are no redefinitions of the
+variable. The point of showing this is to calm your tension about inserting
+such blatent inefficiencies :).</p>
+
+<p>After the rest of the optimizers run, we get:</p>
+
+<div class="doc_code">
+<pre>
+define double @fib(double %x) {
+entry:
+ %cmptmp = fcmp ult double %x, 3.000000e+00
+ %booltmp = uitofp i1 %cmptmp to double
+ %ifcond = fcmp ueq double %booltmp, 0.000000e+00
+ br i1 %ifcond, label %else, label %ifcont
+
+else:
+ %subtmp = fsub double %x, 1.000000e+00
+ %calltmp = call double @fib(double %subtmp)
+ %subtmp5 = fsub double %x, 2.000000e+00
+ %calltmp6 = call double @fib(double %subtmp5)
+ %addtmp = fadd double %calltmp, %calltmp6
+ ret double %addtmp
+
+ifcont:
+ ret double 1.000000e+00
+}
+</pre>
+</div>
+
+<p>Here we see that the simplifycfg pass decided to clone the return instruction
+into the end of the 'else' block. This allowed it to eliminate some branches
+and the PHI node.</p>
+
+<p>Now that all symbol table references are updated to use stack variables,
+we'll add the assignment operator.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="assignment">New Assignment Operator</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>With our current framework, adding a new assignment operator is really
+simple. We will parse it just like any other binary operator, but handle it
+internally (instead of allowing the user to define it). The first step is to
+set a precedence:</p>
+
+<div class="doc_code">
+<pre>
+let main () =
+ (* Install standard binary operators.
+ * 1 is the lowest precedence. *)
+ <b>Hashtbl.add Parser.binop_precedence '=' 2;</b>
+ Hashtbl.add Parser.binop_precedence '&lt;' 10;
+ Hashtbl.add Parser.binop_precedence '+' 20;
+ Hashtbl.add Parser.binop_precedence '-' 20;
+ ...
+</pre>
+</div>
+
+<p>Now that the parser knows the precedence of the binary operator, it takes
+care of all the parsing and AST generation. We just need to implement codegen
+for the assignment operator. This looks like:</p>
+
+<div class="doc_code">
+<pre>
+let rec codegen_expr = function
+ begin match op with
+ | '=' -&gt;
+ (* Special case '=' because we don't want to emit the LHS as an
+ * expression. *)
+ let name =
+ match lhs with
+ | Ast.Variable name -&gt; name
+ | _ -&gt; raise (Error "destination of '=' must be a variable")
+ in
+</pre>
+</div>
+
+<p>Unlike the rest of the binary operators, our assignment operator doesn't
+follow the "emit LHS, emit RHS, do computation" model. As such, it is handled
+as a special case before the other binary operators are handled. The other
+strange thing is that it requires the LHS to be a variable. It is invalid to
+have "(x+1) = expr" - only things like "x = expr" are allowed.
+</p>
+
+
+<div class="doc_code">
+<pre>
+ (* Codegen the rhs. *)
+ let val_ = codegen_expr rhs in
+
+ (* Lookup the name. *)
+ let variable = try Hashtbl.find named_values name with
+ | Not_found -&gt; raise (Error "unknown variable name")
+ in
+ ignore(build_store val_ variable builder);
+ val_
+ | _ -&gt;
+ ...
+</pre>
+</div>
+
+<p>Once we have the variable, codegen'ing the assignment is straightforward:
+we emit the RHS of the assignment, create a store, and return the computed
+value. Returning a value allows for chained assignments like "X = (Y = Z)".</p>
+
+<p>Now that we have an assignment operator, we can mutate loop variables and
+arguments. For example, we can now run code like this:</p>
+
+<div class="doc_code">
+<pre>
+# Function to print a double.
+extern printd(x);
+
+# Define ':' for sequencing: as a low-precedence operator that ignores operands
+# and just returns the RHS.
+def binary : 1 (x y) y;
+
+def test(x)
+ printd(x) :
+ x = 4 :
+ printd(x);
+
+test(123);
+</pre>
+</div>
+
+<p>When run, this example prints "123" and then "4", showing that we did
+actually mutate the value! Okay, we have now officially implemented our goal:
+getting this to work requires SSA construction in the general case. However,
+to be really useful, we want the ability to define our own local variables, lets
+add this next!
+</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="localvars">User-defined Local Variables</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Adding var/in is just like any other other extensions we made to
+Kaleidoscope: we extend the lexer, the parser, the AST and the code generator.
+The first step for adding our new 'var/in' construct is to extend the lexer.
+As before, this is pretty trivial, the code looks like this:</p>
+
+<div class="doc_code">
+<pre>
+type token =
+ ...
+ <b>(* var definition *)
+ | Var</b>
+
+...
+
+and lex_ident buffer = parser
+ ...
+ | "in" -&gt; [&lt; 'Token.In; stream &gt;]
+ | "binary" -&gt; [&lt; 'Token.Binary; stream &gt;]
+ | "unary" -&gt; [&lt; 'Token.Unary; stream &gt;]
+ <b>| "var" -&gt; [&lt; 'Token.Var; stream &gt;]</b>
+ ...
+</pre>
+</div>
+
+<p>The next step is to define the AST node that we will construct. For var/in,
+it looks like this:</p>
+
+<div class="doc_code">
+<pre>
+type expr =
+ ...
+ (* variant for var/in. *)
+ | Var of (string * expr option) array * expr
+ ...
+</pre>
+</div>
+
+<p>var/in allows a list of names to be defined all at once, and each name can
+optionally have an initializer value. As such, we capture this information in
+the VarNames vector. Also, var/in has a body, this body is allowed to access
+the variables defined by the var/in.</p>
+
+<p>With this in place, we can define the parser pieces. The first thing we do
+is add it as a primary expression:</p>
+
+<div class="doc_code">
+<pre>
+(* primary
+ * ::= identifier
+ * ::= numberexpr
+ * ::= parenexpr
+ * ::= ifexpr
+ * ::= forexpr
+ <b>* ::= varexpr</b> *)
+let rec parse_primary = parser
+ ...
+ <b>(* varexpr
+ * ::= 'var' identifier ('=' expression?
+ * (',' identifier ('=' expression)?)* 'in' expression *)
+ | [&lt; 'Token.Var;
+ (* At least one variable name is required. *)
+ 'Token.Ident id ?? "expected identifier after var";
+ init=parse_var_init;
+ var_names=parse_var_names [(id, init)];
+ (* At this point, we have to have 'in'. *)
+ 'Token.In ?? "expected 'in' keyword after 'var'";
+ body=parse_expr &gt;] -&gt;
+ Ast.Var (Array.of_list (List.rev var_names), body)</b>
+
+...
+
+and parse_var_init = parser
+ (* read in the optional initializer. *)
+ | [&lt; 'Token.Kwd '='; e=parse_expr &gt;] -&gt; Some e
+ | [&lt; &gt;] -&gt; None
+
+and parse_var_names accumulator = parser
+ | [&lt; 'Token.Kwd ',';
+ 'Token.Ident id ?? "expected identifier list after var";
+ init=parse_var_init;
+ e=parse_var_names ((id, init) :: accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; accumulator
+</pre>
+</div>
+
+<p>Now that we can parse and represent the code, we need to support emission of
+LLVM IR for it. This code starts out with:</p>
+
+<div class="doc_code">
+<pre>
+let rec codegen_expr = function
+ ...
+ | Ast.Var (var_names, body)
+ let old_bindings = ref [] in
+
+ let the_function = block_parent (insertion_block builder) in
+
+ (* Register all variables and emit their initializer. *)
+ Array.iter (fun (var_name, init) -&gt;
+</pre>
+</div>
+
+<p>Basically it loops over all the variables, installing them one at a time.
+For each variable we put into the symbol table, we remember the previous value
+that we replace in OldBindings.</p>
+
+<div class="doc_code">
+<pre>
+ (* Emit the initializer before adding the variable to scope, this
+ * prevents the initializer from referencing the variable itself, and
+ * permits stuff like this:
+ * var a = 1 in
+ * var a = a in ... # refers to outer 'a'. *)
+ let init_val =
+ match init with
+ | Some init -&gt; codegen_expr init
+ (* If not specified, use 0.0. *)
+ | None -&gt; const_float double_type 0.0
+ in
+
+ let alloca = create_entry_block_alloca the_function var_name in
+ ignore(build_store init_val alloca builder);
+
+ (* Remember the old variable binding so that we can restore the binding
+ * when we unrecurse. *)
+
+ begin
+ try
+ let old_value = Hashtbl.find named_values var_name in
+ old_bindings := (var_name, old_value) :: !old_bindings;
+ with Not_found &gt; ()
+ end;
+
+ (* Remember this binding. *)
+ Hashtbl.add named_values var_name alloca;
+ ) var_names;
+</pre>
+</div>
+
+<p>There are more comments here than code. The basic idea is that we emit the
+initializer, create the alloca, then update the symbol table to point to it.
+Once all the variables are installed in the symbol table, we evaluate the body
+of the var/in expression:</p>
+
+<div class="doc_code">
+<pre>
+ (* Codegen the body, now that all vars are in scope. *)
+ let body_val = codegen_expr body in
+</pre>
+</div>
+
+<p>Finally, before returning, we restore the previous variable bindings:</p>
+
+<div class="doc_code">
+<pre>
+ (* Pop all our variables from scope. *)
+ List.iter (fun (var_name, old_value) -&gt;
+ Hashtbl.add named_values var_name old_value
+ ) !old_bindings;
+
+ (* Return the body computation. *)
+ body_val
+</pre>
+</div>
+
+<p>The end result of all of this is that we get properly scoped variable
+definitions, and we even (trivially) allow mutation of them :).</p>
+
+<p>With this, we completed what we set out to do. Our nice iterative fib
+example from the intro compiles and runs just fine. The mem2reg pass optimizes
+all of our stack variables into SSA registers, inserting PHI nodes where needed,
+and our front-end remains simple: no "iterated dominance frontier" computation
+anywhere in sight.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="code">Full Code Listing</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>
+Here is the complete code listing for our running example, enhanced with mutable
+variables and var/in support. To build this example, use:
+</p>
+
+<div class="doc_code">
+<pre>
+# Compile
+ocamlbuild toy.byte
+# Run
+./toy.byte
+</pre>
+</div>
+
+<p>Here is the code:</p>
+
+<dl>
+<dt>_tags:</dt>
+<dd class="doc_code">
+<pre>
+&lt;{lexer,parser}.ml&gt;: use_camlp4, pp(camlp4of)
+&lt;*.{byte,native}&gt;: g++, use_llvm, use_llvm_analysis
+&lt;*.{byte,native}&gt;: use_llvm_executionengine, use_llvm_target
+&lt;*.{byte,native}&gt;: use_llvm_scalar_opts, use_bindings
+</pre>
+</dd>
+
+<dt>myocamlbuild.ml:</dt>
+<dd class="doc_code">
+<pre>
+open Ocamlbuild_plugin;;
+
+ocaml_lib ~extern:true "llvm";;
+ocaml_lib ~extern:true "llvm_analysis";;
+ocaml_lib ~extern:true "llvm_executionengine";;
+ocaml_lib ~extern:true "llvm_target";;
+ocaml_lib ~extern:true "llvm_scalar_opts";;
+
+flag ["link"; "ocaml"; "g++"] (S[A"-cc"; A"g++"; A"-cclib"; A"-rdynamic"]);;
+dep ["link"; "ocaml"; "use_bindings"] ["bindings.o"];;
+</pre>
+</dd>
+
+<dt>token.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Lexer Tokens
+ *===----------------------------------------------------------------------===*)
+
+(* The lexer returns these 'Kwd' if it is an unknown character, otherwise one of
+ * these others for known things. *)
+type token =
+ (* commands *)
+ | Def | Extern
+
+ (* primary *)
+ | Ident of string | Number of float
+
+ (* unknown *)
+ | Kwd of char
+
+ (* control *)
+ | If | Then | Else
+ | For | In
+
+ (* operators *)
+ | Binary | Unary
+
+ (* var definition *)
+ | Var
+</pre>
+</dd>
+
+<dt>lexer.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Lexer
+ *===----------------------------------------------------------------------===*)
+
+let rec lex = parser
+ (* Skip any whitespace. *)
+ | [&lt; ' (' ' | '\n' | '\r' | '\t'); stream &gt;] -&gt; lex stream
+
+ (* identifier: [a-zA-Z][a-zA-Z0-9] *)
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+
+ (* number: [0-9.]+ *)
+ | [&lt; ' ('0' .. '9' as c); stream &gt;] -&gt;
+ let buffer = Buffer.create 1 in
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+
+ (* Comment until end of line. *)
+ | [&lt; ' ('#'); stream &gt;] -&gt;
+ lex_comment stream
+
+ (* Otherwise, just return the character as its ascii value. *)
+ | [&lt; 'c; stream &gt;] -&gt;
+ [&lt; 'Token.Kwd c; lex stream &gt;]
+
+ (* end of stream. *)
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+
+and lex_number buffer = parser
+ | [&lt; ' ('0' .. '9' | '.' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_number buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ [&lt; 'Token.Number (float_of_string (Buffer.contents buffer)); stream &gt;]
+
+and lex_ident buffer = parser
+ | [&lt; ' ('A' .. 'Z' | 'a' .. 'z' | '0' .. '9' as c); stream &gt;] -&gt;
+ Buffer.add_char buffer c;
+ lex_ident buffer stream
+ | [&lt; stream=lex &gt;] -&gt;
+ match Buffer.contents buffer with
+ | "def" -&gt; [&lt; 'Token.Def; stream &gt;]
+ | "extern" -&gt; [&lt; 'Token.Extern; stream &gt;]
+ | "if" -&gt; [&lt; 'Token.If; stream &gt;]
+ | "then" -&gt; [&lt; 'Token.Then; stream &gt;]
+ | "else" -&gt; [&lt; 'Token.Else; stream &gt;]
+ | "for" -&gt; [&lt; 'Token.For; stream &gt;]
+ | "in" -&gt; [&lt; 'Token.In; stream &gt;]
+ | "binary" -&gt; [&lt; 'Token.Binary; stream &gt;]
+ | "unary" -&gt; [&lt; 'Token.Unary; stream &gt;]
+ | "var" -&gt; [&lt; 'Token.Var; stream &gt;]
+ | id -&gt; [&lt; 'Token.Ident id; stream &gt;]
+
+and lex_comment = parser
+ | [&lt; ' ('\n'); stream=lex &gt;] -&gt; stream
+ | [&lt; 'c; e=lex_comment &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; [&lt; &gt;]
+</pre>
+</dd>
+
+<dt>ast.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Abstract Syntax Tree (aka Parse Tree)
+ *===----------------------------------------------------------------------===*)
+
+(* expr - Base type for all expression nodes. *)
+type expr =
+ (* variant for numeric literals like "1.0". *)
+ | Number of float
+
+ (* variant for referencing a variable, like "a". *)
+ | Variable of string
+
+ (* variant for a unary operator. *)
+ | Unary of char * expr
+
+ (* variant for a binary operator. *)
+ | Binary of char * expr * expr
+
+ (* variant for function calls. *)
+ | Call of string * expr array
+
+ (* variant for if/then/else. *)
+ | If of expr * expr * expr
+
+ (* variant for for/in. *)
+ | For of string * expr * expr * expr option * expr
+
+ (* variant for var/in. *)
+ | Var of (string * expr option) array * expr
+
+(* proto - This type represents the "prototype" for a function, which captures
+ * its name, and its argument names (thus implicitly the number of arguments the
+ * function takes). *)
+type proto =
+ | Prototype of string * string array
+ | BinOpPrototype of string * string array * int
+
+(* func - This type represents a function definition itself. *)
+type func = Function of proto * expr
+</pre>
+</dd>
+
+<dt>parser.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===---------------------------------------------------------------------===
+ * Parser
+ *===---------------------------------------------------------------------===*)
+
+(* binop_precedence - This holds the precedence for each binary operator that is
+ * defined *)
+let binop_precedence:(char, int) Hashtbl.t = Hashtbl.create 10
+
+(* precedence - Get the precedence of the pending binary operator token. *)
+let precedence c = try Hashtbl.find binop_precedence c with Not_found -&gt; -1
+
+(* primary
+ * ::= identifier
+ * ::= numberexpr
+ * ::= parenexpr
+ * ::= ifexpr
+ * ::= forexpr
+ * ::= varexpr *)
+let rec parse_primary = parser
+ (* numberexpr ::= number *)
+ | [&lt; 'Token.Number n &gt;] -&gt; Ast.Number n
+
+ (* parenexpr ::= '(' expression ')' *)
+ | [&lt; 'Token.Kwd '('; e=parse_expr; 'Token.Kwd ')' ?? "expected ')'" &gt;] -&gt; e
+
+ (* identifierexpr
+ * ::= identifier
+ * ::= identifier '(' argumentexpr ')' *)
+ | [&lt; 'Token.Ident id; stream &gt;] -&gt;
+ let rec parse_args accumulator = parser
+ | [&lt; e=parse_expr; stream &gt;] -&gt;
+ begin parser
+ | [&lt; 'Token.Kwd ','; e=parse_args (e :: accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; e :: accumulator
+ end stream
+ | [&lt; &gt;] -&gt; accumulator
+ in
+ let rec parse_ident id = parser
+ (* Call. *)
+ | [&lt; 'Token.Kwd '(';
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')'"&gt;] -&gt;
+ Ast.Call (id, Array.of_list (List.rev args))
+
+ (* Simple variable ref. *)
+ | [&lt; &gt;] -&gt; Ast.Variable id
+ in
+ parse_ident id stream
+
+ (* ifexpr ::= 'if' expr 'then' expr 'else' expr *)
+ | [&lt; 'Token.If; c=parse_expr;
+ 'Token.Then ?? "expected 'then'"; t=parse_expr;
+ 'Token.Else ?? "expected 'else'"; e=parse_expr &gt;] -&gt;
+ Ast.If (c, t, e)
+
+ (* forexpr
+ ::= 'for' identifier '=' expr ',' expr (',' expr)? 'in' expression *)
+ | [&lt; 'Token.For;
+ 'Token.Ident id ?? "expected identifier after for";
+ 'Token.Kwd '=' ?? "expected '=' after for";
+ stream &gt;] -&gt;
+ begin parser
+ | [&lt;
+ start=parse_expr;
+ 'Token.Kwd ',' ?? "expected ',' after for";
+ end_=parse_expr;
+ stream &gt;] -&gt;
+ let step =
+ begin parser
+ | [&lt; 'Token.Kwd ','; step=parse_expr &gt;] -&gt; Some step
+ | [&lt; &gt;] -&gt; None
+ end stream
+ in
+ begin parser
+ | [&lt; 'Token.In; body=parse_expr &gt;] -&gt;
+ Ast.For (id, start, end_, step, body)
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected 'in' after for")
+ end stream
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected '=' after for")
+ end stream
+
+ (* varexpr
+ * ::= 'var' identifier ('=' expression?
+ * (',' identifier ('=' expression)?)* 'in' expression *)
+ | [&lt; 'Token.Var;
+ (* At least one variable name is required. *)
+ 'Token.Ident id ?? "expected identifier after var";
+ init=parse_var_init;
+ var_names=parse_var_names [(id, init)];
+ (* At this point, we have to have 'in'. *)
+ 'Token.In ?? "expected 'in' keyword after 'var'";
+ body=parse_expr &gt;] -&gt;
+ Ast.Var (Array.of_list (List.rev var_names), body)
+
+ | [&lt; &gt;] -&gt; raise (Stream.Error "unknown token when expecting an expression.")
+
+(* unary
+ * ::= primary
+ * ::= '!' unary *)
+and parse_unary = parser
+ (* If this is a unary operator, read it. *)
+ | [&lt; 'Token.Kwd op when op != '(' &amp;&amp; op != ')'; operand=parse_expr &gt;] -&gt;
+ Ast.Unary (op, operand)
+
+ (* If the current token is not an operator, it must be a primary expr. *)
+ | [&lt; stream &gt;] -&gt; parse_primary stream
+
+(* binoprhs
+ * ::= ('+' primary)* *)
+and parse_bin_rhs expr_prec lhs stream =
+ match Stream.peek stream with
+ (* If this is a binop, find its precedence. *)
+ | Some (Token.Kwd c) when Hashtbl.mem binop_precedence c -&gt;
+ let token_prec = precedence c in
+
+ (* If this is a binop that binds at least as tightly as the current binop,
+ * consume it, otherwise we are done. *)
+ if token_prec &lt; expr_prec then lhs else begin
+ (* Eat the binop. *)
+ Stream.junk stream;
+
+ (* Parse the primary expression after the binary operator. *)
+ let rhs = parse_unary stream in
+
+ (* Okay, we know this is a binop. *)
+ let rhs =
+ match Stream.peek stream with
+ | Some (Token.Kwd c2) -&gt;
+ (* If BinOp binds less tightly with rhs than the operator after
+ * rhs, let the pending operator take rhs as its lhs. *)
+ let next_prec = precedence c2 in
+ if token_prec &lt; next_prec
+ then parse_bin_rhs (token_prec + 1) rhs stream
+ else rhs
+ | _ -&gt; rhs
+ in
+
+ (* Merge lhs/rhs. *)
+ let lhs = Ast.Binary (c, lhs, rhs) in
+ parse_bin_rhs expr_prec lhs stream
+ end
+ | _ -&gt; lhs
+
+and parse_var_init = parser
+ (* read in the optional initializer. *)
+ | [&lt; 'Token.Kwd '='; e=parse_expr &gt;] -&gt; Some e
+ | [&lt; &gt;] -&gt; None
+
+and parse_var_names accumulator = parser
+ | [&lt; 'Token.Kwd ',';
+ 'Token.Ident id ?? "expected identifier list after var";
+ init=parse_var_init;
+ e=parse_var_names ((id, init) :: accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; accumulator
+
+(* expression
+ * ::= primary binoprhs *)
+and parse_expr = parser
+ | [&lt; lhs=parse_unary; stream &gt;] -&gt; parse_bin_rhs 0 lhs stream
+
+(* prototype
+ * ::= id '(' id* ')'
+ * ::= binary LETTER number? (id, id)
+ * ::= unary LETTER number? (id) *)
+let parse_prototype =
+ let rec parse_args accumulator = parser
+ | [&lt; 'Token.Ident id; e=parse_args (id::accumulator) &gt;] -&gt; e
+ | [&lt; &gt;] -&gt; accumulator
+ in
+ let parse_operator = parser
+ | [&lt; 'Token.Unary &gt;] -&gt; "unary", 1
+ | [&lt; 'Token.Binary &gt;] -&gt; "binary", 2
+ in
+ let parse_binary_precedence = parser
+ | [&lt; 'Token.Number n &gt;] -&gt; int_of_float n
+ | [&lt; &gt;] -&gt; 30
+ in
+ parser
+ | [&lt; 'Token.Ident id;
+ 'Token.Kwd '(' ?? "expected '(' in prototype";
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')' in prototype" &gt;] -&gt;
+ (* success. *)
+ Ast.Prototype (id, Array.of_list (List.rev args))
+ | [&lt; (prefix, kind)=parse_operator;
+ 'Token.Kwd op ?? "expected an operator";
+ (* Read the precedence if present. *)
+ binary_precedence=parse_binary_precedence;
+ 'Token.Kwd '(' ?? "expected '(' in prototype";
+ args=parse_args [];
+ 'Token.Kwd ')' ?? "expected ')' in prototype" &gt;] -&gt;
+ let name = prefix ^ (String.make 1 op) in
+ let args = Array.of_list (List.rev args) in
+
+ (* Verify right number of arguments for operator. *)
+ if Array.length args != kind
+ then raise (Stream.Error "invalid number of operands for operator")
+ else
+ if kind == 1 then
+ Ast.Prototype (name, args)
+ else
+ Ast.BinOpPrototype (name, args, binary_precedence)
+ | [&lt; &gt;] -&gt;
+ raise (Stream.Error "expected function name in prototype")
+
+(* definition ::= 'def' prototype expression *)
+let parse_definition = parser
+ | [&lt; 'Token.Def; p=parse_prototype; e=parse_expr &gt;] -&gt;
+ Ast.Function (p, e)
+
+(* toplevelexpr ::= expression *)
+let parse_toplevel = parser
+ | [&lt; e=parse_expr &gt;] -&gt;
+ (* Make an anonymous proto. *)
+ Ast.Function (Ast.Prototype ("", [||]), e)
+
+(* external ::= 'extern' prototype *)
+let parse_extern = parser
+ | [&lt; 'Token.Extern; e=parse_prototype &gt;] -&gt; e
+</pre>
+</dd>
+
+<dt>codegen.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Code Generation
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+
+exception Error of string
+
+let context = global_context ()
+let the_module = create_module context "my cool jit"
+let builder = builder context
+let named_values:(string, llvalue) Hashtbl.t = Hashtbl.create 10
+let double_type = double_type context
+
+(* Create an alloca instruction in the entry block of the function. This
+ * is used for mutable variables etc. *)
+let create_entry_block_alloca the_function var_name =
+ let builder = builder_at context (instr_begin (entry_block the_function)) in
+ build_alloca double_type var_name builder
+
+let rec codegen_expr = function
+ | Ast.Number n -&gt; const_float double_type n
+ | Ast.Variable name -&gt;
+ let v = try Hashtbl.find named_values name with
+ | Not_found -&gt; raise (Error "unknown variable name")
+ in
+ (* Load the value. *)
+ build_load v name builder
+ | Ast.Unary (op, operand) -&gt;
+ let operand = codegen_expr operand in
+ let callee = "unary" ^ (String.make 1 op) in
+ let callee =
+ match lookup_function callee the_module with
+ | Some callee -&gt; callee
+ | None -&gt; raise (Error "unknown unary operator")
+ in
+ build_call callee [|operand|] "unop" builder
+ | Ast.Binary (op, lhs, rhs) -&gt;
+ begin match op with
+ | '=' -&gt;
+ (* Special case '=' because we don't want to emit the LHS as an
+ * expression. *)
+ let name =
+ match lhs with
+ | Ast.Variable name -&gt; name
+ | _ -&gt; raise (Error "destination of '=' must be a variable")
+ in
+
+ (* Codegen the rhs. *)
+ let val_ = codegen_expr rhs in
+
+ (* Lookup the name. *)
+ let variable = try Hashtbl.find named_values name with
+ | Not_found -&gt; raise (Error "unknown variable name")
+ in
+ ignore(build_store val_ variable builder);
+ val_
+ | _ -&gt;
+ let lhs_val = codegen_expr lhs in
+ let rhs_val = codegen_expr rhs in
+ begin
+ match op with
+ | '+' -&gt; build_add lhs_val rhs_val "addtmp" builder
+ | '-' -&gt; build_sub lhs_val rhs_val "subtmp" builder
+ | '*' -&gt; build_mul lhs_val rhs_val "multmp" builder
+ | '&lt;' -&gt;
+ (* Convert bool 0/1 to double 0.0 or 1.0 *)
+ let i = build_fcmp Fcmp.Ult lhs_val rhs_val "cmptmp" builder in
+ build_uitofp i double_type "booltmp" builder
+ | _ -&gt;
+ (* If it wasn't a builtin binary operator, it must be a user defined
+ * one. Emit a call to it. *)
+ let callee = "binary" ^ (String.make 1 op) in
+ let callee =
+ match lookup_function callee the_module with
+ | Some callee -&gt; callee
+ | None -&gt; raise (Error "binary operator not found!")
+ in
+ build_call callee [|lhs_val; rhs_val|] "binop" builder
+ end
+ end
+ | Ast.Call (callee, args) -&gt;
+ (* Look up the name in the module table. *)
+ let callee =
+ match lookup_function callee the_module with
+ | Some callee -&gt; callee
+ | None -&gt; raise (Error "unknown function referenced")
+ in
+ let params = params callee in
+
+ (* If argument mismatch error. *)
+ if Array.length params == Array.length args then () else
+ raise (Error "incorrect # arguments passed");
+ let args = Array.map codegen_expr args in
+ build_call callee args "calltmp" builder
+ | Ast.If (cond, then_, else_) -&gt;
+ let cond = codegen_expr cond in
+
+ (* Convert condition to a bool by comparing equal to 0.0 *)
+ let zero = const_float double_type 0.0 in
+ let cond_val = build_fcmp Fcmp.One cond zero "ifcond" builder in
+
+ (* Grab the first block so that we might later add the conditional branch
+ * to it at the end of the function. *)
+ let start_bb = insertion_block builder in
+ let the_function = block_parent start_bb in
+
+ let then_bb = append_block context "then" the_function in
+
+ (* Emit 'then' value. *)
+ position_at_end then_bb builder;
+ let then_val = codegen_expr then_ in
+
+ (* Codegen of 'then' can change the current block, update then_bb for the
+ * phi. We create a new name because one is used for the phi node, and the
+ * other is used for the conditional branch. *)
+ let new_then_bb = insertion_block builder in
+
+ (* Emit 'else' value. *)
+ let else_bb = append_block context "else" the_function in
+ position_at_end else_bb builder;
+ let else_val = codegen_expr else_ in
+
+ (* Codegen of 'else' can change the current block, update else_bb for the
+ * phi. *)
+ let new_else_bb = insertion_block builder in
+
+ (* Emit merge block. *)
+ let merge_bb = append_block context "ifcont" the_function in
+ position_at_end merge_bb builder;
+ let incoming = [(then_val, new_then_bb); (else_val, new_else_bb)] in
+ let phi = build_phi incoming "iftmp" builder in
+
+ (* Return to the start block to add the conditional branch. *)
+ position_at_end start_bb builder;
+ ignore (build_cond_br cond_val then_bb else_bb builder);
+
+ (* Set a unconditional branch at the end of the 'then' block and the
+ * 'else' block to the 'merge' block. *)
+ position_at_end new_then_bb builder; ignore (build_br merge_bb builder);
+ position_at_end new_else_bb builder; ignore (build_br merge_bb builder);
+
+ (* Finally, set the builder to the end of the merge block. *)
+ position_at_end merge_bb builder;
+
+ phi
+ | Ast.For (var_name, start, end_, step, body) -&gt;
+ (* Output this as:
+ * var = alloca double
+ * ...
+ * start = startexpr
+ * store start -&gt; var
+ * goto loop
+ * loop:
+ * ...
+ * bodyexpr
+ * ...
+ * loopend:
+ * step = stepexpr
+ * endcond = endexpr
+ *
+ * curvar = load var
+ * nextvar = curvar + step
+ * store nextvar -&gt; var
+ * br endcond, loop, endloop
+ * outloop: *)
+
+ let the_function = block_parent (insertion_block builder) in
+
+ (* Create an alloca for the variable in the entry block. *)
+ let alloca = create_entry_block_alloca the_function var_name in
+
+ (* Emit the start code first, without 'variable' in scope. *)
+ let start_val = codegen_expr start in
+
+ (* Store the value into the alloca. *)
+ ignore(build_store start_val alloca builder);
+
+ (* Make the new basic block for the loop header, inserting after current
+ * block. *)
+ let loop_bb = append_block context "loop" the_function in
+
+ (* Insert an explicit fall through from the current block to the
+ * loop_bb. *)
+ ignore (build_br loop_bb builder);
+
+ (* Start insertion in loop_bb. *)
+ position_at_end loop_bb builder;
+
+ (* Within the loop, the variable is defined equal to the PHI node. If it
+ * shadows an existing variable, we have to restore it, so save it
+ * now. *)
+ let old_val =
+ try Some (Hashtbl.find named_values var_name) with Not_found -&gt; None
+ in
+ Hashtbl.add named_values var_name alloca;
+
+ (* Emit the body of the loop. This, like any other expr, can change the
+ * current BB. Note that we ignore the value computed by the body, but
+ * don't allow an error *)
+ ignore (codegen_expr body);
+
+ (* Emit the step value. *)
+ let step_val =
+ match step with
+ | Some step -&gt; codegen_expr step
+ (* If not specified, use 1.0. *)
+ | None -&gt; const_float double_type 1.0
+ in
+
+ (* Compute the end condition. *)
+ let end_cond = codegen_expr end_ in
+
+ (* Reload, increment, and restore the alloca. This handles the case where
+ * the body of the loop mutates the variable. *)
+ let cur_var = build_load alloca var_name builder in
+ let next_var = build_add cur_var step_val "nextvar" builder in
+ ignore(build_store next_var alloca builder);
+
+ (* Convert condition to a bool by comparing equal to 0.0. *)
+ let zero = const_float double_type 0.0 in
+ let end_cond = build_fcmp Fcmp.One end_cond zero "loopcond" builder in
+
+ (* Create the "after loop" block and insert it. *)
+ let after_bb = append_block context "afterloop" the_function in
+
+ (* Insert the conditional branch into the end of loop_end_bb. *)
+ ignore (build_cond_br end_cond loop_bb after_bb builder);
+
+ (* Any new code will be inserted in after_bb. *)
+ position_at_end after_bb builder;
+
+ (* Restore the unshadowed variable. *)
+ begin match old_val with
+ | Some old_val -&gt; Hashtbl.add named_values var_name old_val
+ | None -&gt; ()
+ end;
+
+ (* for expr always returns 0.0. *)
+ const_null double_type
+ | Ast.Var (var_names, body) -&gt;
+ let old_bindings = ref [] in
+
+ let the_function = block_parent (insertion_block builder) in
+
+ (* Register all variables and emit their initializer. *)
+ Array.iter (fun (var_name, init) -&gt;
+ (* Emit the initializer before adding the variable to scope, this
+ * prevents the initializer from referencing the variable itself, and
+ * permits stuff like this:
+ * var a = 1 in
+ * var a = a in ... # refers to outer 'a'. *)
+ let init_val =
+ match init with
+ | Some init -&gt; codegen_expr init
+ (* If not specified, use 0.0. *)
+ | None -&gt; const_float double_type 0.0
+ in
+
+ let alloca = create_entry_block_alloca the_function var_name in
+ ignore(build_store init_val alloca builder);
+
+ (* Remember the old variable binding so that we can restore the binding
+ * when we unrecurse. *)
+ begin
+ try
+ let old_value = Hashtbl.find named_values var_name in
+ old_bindings := (var_name, old_value) :: !old_bindings;
+ with Not_found -&gt; ()
+ end;
+
+ (* Remember this binding. *)
+ Hashtbl.add named_values var_name alloca;
+ ) var_names;
+
+ (* Codegen the body, now that all vars are in scope. *)
+ let body_val = codegen_expr body in
+
+ (* Pop all our variables from scope. *)
+ List.iter (fun (var_name, old_value) -&gt;
+ Hashtbl.add named_values var_name old_value
+ ) !old_bindings;
+
+ (* Return the body computation. *)
+ body_val
+
+let codegen_proto = function
+ | Ast.Prototype (name, args) | Ast.BinOpPrototype (name, args, _) -&gt;
+ (* Make the function type: double(double,double) etc. *)
+ let doubles = Array.make (Array.length args) double_type in
+ let ft = function_type double_type doubles in
+ let f =
+ match lookup_function name the_module with
+ | None -&gt; declare_function name ft the_module
+
+ (* If 'f' conflicted, there was already something named 'name'. If it
+ * has a body, don't allow redefinition or reextern. *)
+ | Some f -&gt;
+ (* If 'f' already has a body, reject this. *)
+ if block_begin f &lt;&gt; At_end f then
+ raise (Error "redefinition of function");
+
+ (* If 'f' took a different number of arguments, reject. *)
+ if element_type (type_of f) &lt;&gt; ft then
+ raise (Error "redefinition of function with different # args");
+ f
+ in
+
+ (* Set names for all arguments. *)
+ Array.iteri (fun i a -&gt;
+ let n = args.(i) in
+ set_value_name n a;
+ Hashtbl.add named_values n a;
+ ) (params f);
+ f
+
+(* Create an alloca for each argument and register the argument in the symbol
+ * table so that references to it will succeed. *)
+let create_argument_allocas the_function proto =
+ let args = match proto with
+ | Ast.Prototype (_, args) | Ast.BinOpPrototype (_, args, _) -&gt; args
+ in
+ Array.iteri (fun i ai -&gt;
+ let var_name = args.(i) in
+ (* Create an alloca for this variable. *)
+ let alloca = create_entry_block_alloca the_function var_name in
+
+ (* Store the initial value into the alloca. *)
+ ignore(build_store ai alloca builder);
+
+ (* Add arguments to variable symbol table. *)
+ Hashtbl.add named_values var_name alloca;
+ ) (params the_function)
+
+let codegen_func the_fpm = function
+ | Ast.Function (proto, body) -&gt;
+ Hashtbl.clear named_values;
+ let the_function = codegen_proto proto in
+
+ (* If this is an operator, install it. *)
+ begin match proto with
+ | Ast.BinOpPrototype (name, args, prec) -&gt;
+ let op = name.[String.length name - 1] in
+ Hashtbl.add Parser.binop_precedence op prec;
+ | _ -&gt; ()
+ end;
+
+ (* Create a new basic block to start insertion into. *)
+ let bb = append_block context "entry" the_function in
+ position_at_end bb builder;
+
+ try
+ (* Add all arguments to the symbol table and create their allocas. *)
+ create_argument_allocas the_function proto;
+
+ let ret_val = codegen_expr body in
+
+ (* Finish off the function. *)
+ let _ = build_ret ret_val builder in
+
+ (* Validate the generated code, checking for consistency. *)
+ Llvm_analysis.assert_valid_function the_function;
+
+ (* Optimize the function. *)
+ let _ = PassManager.run_function the_function the_fpm in
+
+ the_function
+ with e -&gt;
+ delete_function the_function;
+ raise e
+</pre>
+</dd>
+
+<dt>toplevel.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Top-Level parsing and JIT Driver
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+open Llvm_executionengine
+
+(* top ::= definition | external | expression | ';' *)
+let rec main_loop the_fpm the_execution_engine stream =
+ match Stream.peek stream with
+ | None -&gt; ()
+
+ (* ignore top-level semicolons. *)
+ | Some (Token.Kwd ';') -&gt;
+ Stream.junk stream;
+ main_loop the_fpm the_execution_engine stream
+
+ | Some token -&gt;
+ begin
+ try match token with
+ | Token.Def -&gt;
+ let e = Parser.parse_definition stream in
+ print_endline "parsed a function definition.";
+ dump_value (Codegen.codegen_func the_fpm e);
+ | Token.Extern -&gt;
+ let e = Parser.parse_extern stream in
+ print_endline "parsed an extern.";
+ dump_value (Codegen.codegen_proto e);
+ | _ -&gt;
+ (* Evaluate a top-level expression into an anonymous function. *)
+ let e = Parser.parse_toplevel stream in
+ print_endline "parsed a top-level expr";
+ let the_function = Codegen.codegen_func the_fpm e in
+ dump_value the_function;
+
+ (* JIT the function, returning a function pointer. *)
+ let result = ExecutionEngine.run_function the_function [||]
+ the_execution_engine in
+
+ print_string "Evaluated to ";
+ print_float (GenericValue.as_float Codegen.double_type result);
+ print_newline ();
+ with Stream.Error s | Codegen.Error s -&gt;
+ (* Skip token for error recovery. *)
+ Stream.junk stream;
+ print_endline s;
+ end;
+ print_string "ready&gt; "; flush stdout;
+ main_loop the_fpm the_execution_engine stream
+</pre>
+</dd>
+
+<dt>toy.ml:</dt>
+<dd class="doc_code">
+<pre>
+(*===----------------------------------------------------------------------===
+ * Main driver code.
+ *===----------------------------------------------------------------------===*)
+
+open Llvm
+open Llvm_executionengine
+open Llvm_target
+open Llvm_scalar_opts
+
+let main () =
+ ignore (initialize_native_target ());
+
+ (* Install standard binary operators.
+ * 1 is the lowest precedence. *)
+ Hashtbl.add Parser.binop_precedence '=' 2;
+ Hashtbl.add Parser.binop_precedence '&lt;' 10;
+ Hashtbl.add Parser.binop_precedence '+' 20;
+ Hashtbl.add Parser.binop_precedence '-' 20;
+ Hashtbl.add Parser.binop_precedence '*' 40; (* highest. *)
+
+ (* Prime the first token. *)
+ print_string "ready&gt; "; flush stdout;
+ let stream = Lexer.lex (Stream.of_channel stdin) in
+
+ (* Create the JIT. *)
+ let the_execution_engine = ExecutionEngine.create Codegen.the_module in
+ let the_fpm = PassManager.create_function Codegen.the_module in
+
+ (* Set up the optimizer pipeline. Start with registering info about how the
+ * target lays out data structures. *)
+ TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm;
+
+ (* Promote allocas to registers. *)
+ add_memory_to_register_promotion the_fpm;
+
+ (* Do simple "peephole" optimizations and bit-twiddling optzn. *)
+ add_instruction_combination the_fpm;
+
+ (* reassociate expressions. *)
+ add_reassociation the_fpm;
+
+ (* Eliminate Common SubExpressions. *)
+ add_gvn the_fpm;
+
+ (* Simplify the control flow graph (deleting unreachable blocks, etc). *)
+ add_cfg_simplification the_fpm;
+
+ ignore (PassManager.initialize the_fpm);
+
+ (* Run the main "interpreter loop" now. *)
+ Toplevel.main_loop the_fpm the_execution_engine stream;
+
+ (* Print out all the generated code. *)
+ dump_module Codegen.the_module
+;;
+
+main ()
+</pre>
+</dd>
+
+<dt>bindings.c</dt>
+<dd class="doc_code">
+<pre>
+#include &lt;stdio.h&gt;
+
+/* putchard - putchar that takes a double and returns 0. */
+extern double putchard(double X) {
+ putchar((char)X);
+ return 0;
+}
+
+/* printd - printf that takes a double prints it as "%f\n", returning 0. */
+extern double printd(double X) {
+ printf("%f\n", X);
+ return 0;
+}
+</pre>
+</dd>
+</dl>
+
+<a href="OCamlLangImpl8.html">Next: Conclusion and other useful LLVM tidbits</a>
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ <a href="mailto:idadesub@users.sourceforge.net">Erick Tryzelaar</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/OCamlLangImpl8.html b/docs/tutorial/OCamlLangImpl8.html
new file mode 100644
index 00000000000..7c1a500a21b
--- /dev/null
+++ b/docs/tutorial/OCamlLangImpl8.html
@@ -0,0 +1,359 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>Kaleidoscope: Conclusion and other useful LLVM tidbits</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Chris Lattner">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>Kaleidoscope: Conclusion and other useful LLVM tidbits</h1>
+
+<ul>
+<li><a href="index.html">Up to Tutorial Index</a></li>
+<li>Chapter 8
+ <ol>
+ <li><a href="#conclusion">Tutorial Conclusion</a></li>
+ <li><a href="#llvmirproperties">Properties of LLVM IR</a>
+ <ul>
+ <li><a href="#targetindep">Target Independence</a></li>
+ <li><a href="#safety">Safety Guarantees</a></li>
+ <li><a href="#langspecific">Language-Specific Optimizations</a></li>
+ </ul>
+ </li>
+ <li><a href="#tipsandtricks">Tips and Tricks</a>
+ <ul>
+ <li><a href="#offsetofsizeof">Implementing portable
+ offsetof/sizeof</a></li>
+ <li><a href="#gcstack">Garbage Collected Stack Frames</a></li>
+ </ul>
+ </li>
+ </ol>
+</li>
+</ul>
+
+
+<div class="doc_author">
+ <p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a></p>
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="conclusion">Tutorial Conclusion</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>Welcome to the final chapter of the "<a href="index.html">Implementing a
+language with LLVM</a>" tutorial. In the course of this tutorial, we have grown
+our little Kaleidoscope language from being a useless toy, to being a
+semi-interesting (but probably still useless) toy. :)</p>
+
+<p>It is interesting to see how far we've come, and how little code it has
+taken. We built the entire lexer, parser, AST, code generator, and an
+interactive run-loop (with a JIT!) by-hand in under 700 lines of
+(non-comment/non-blank) code.</p>
+
+<p>Our little language supports a couple of interesting features: it supports
+user defined binary and unary operators, it uses JIT compilation for immediate
+evaluation, and it supports a few control flow constructs with SSA construction.
+</p>
+
+<p>Part of the idea of this tutorial was to show you how easy and fun it can be
+to define, build, and play with languages. Building a compiler need not be a
+scary or mystical process! Now that you've seen some of the basics, I strongly
+encourage you to take the code and hack on it. For example, try adding:</p>
+
+<ul>
+<li><b>global variables</b> - While global variables have questional value in
+modern software engineering, they are often useful when putting together quick
+little hacks like the Kaleidoscope compiler itself. Fortunately, our current
+setup makes it very easy to add global variables: just have value lookup check
+to see if an unresolved variable is in the global variable symbol table before
+rejecting it. To create a new global variable, make an instance of the LLVM
+<tt>GlobalVariable</tt> class.</li>
+
+<li><b>typed variables</b> - Kaleidoscope currently only supports variables of
+type double. This gives the language a very nice elegance, because only
+supporting one type means that you never have to specify types. Different
+languages have different ways of handling this. The easiest way is to require
+the user to specify types for every variable definition, and record the type
+of the variable in the symbol table along with its Value*.</li>
+
+<li><b>arrays, structs, vectors, etc</b> - Once you add types, you can start
+extending the type system in all sorts of interesting ways. Simple arrays are
+very easy and are quite useful for many different applications. Adding them is
+mostly an exercise in learning how the LLVM <a
+href="../LangRef.html#i_getelementptr">getelementptr</a> instruction works: it
+is so nifty/unconventional, it <a
+href="../GetElementPtr.html">has its own FAQ</a>! If you add support
+for recursive types (e.g. linked lists), make sure to read the <a
+href="../ProgrammersManual.html#TypeResolve">section in the LLVM
+Programmer's Manual</a> that describes how to construct them.</li>
+
+<li><b>standard runtime</b> - Our current language allows the user to access
+arbitrary external functions, and we use it for things like "printd" and
+"putchard". As you extend the language to add higher-level constructs, often
+these constructs make the most sense if they are lowered to calls into a
+language-supplied runtime. For example, if you add hash tables to the language,
+it would probably make sense to add the routines to a runtime, instead of
+inlining them all the way.</li>
+
+<li><b>memory management</b> - Currently we can only access the stack in
+Kaleidoscope. It would also be useful to be able to allocate heap memory,
+either with calls to the standard libc malloc/free interface or with a garbage
+collector. If you would like to use garbage collection, note that LLVM fully
+supports <a href="../GarbageCollection.html">Accurate Garbage Collection</a>
+including algorithms that move objects and need to scan/update the stack.</li>
+
+<li><b>debugger support</b> - LLVM supports generation of <a
+href="../SourceLevelDebugging.html">DWARF Debug info</a> which is understood by
+common debuggers like GDB. Adding support for debug info is fairly
+straightforward. The best way to understand it is to compile some C/C++ code
+with "<tt>llvm-gcc -g -O0</tt>" and taking a look at what it produces.</li>
+
+<li><b>exception handling support</b> - LLVM supports generation of <a
+href="../ExceptionHandling.html">zero cost exceptions</a> which interoperate
+with code compiled in other languages. You could also generate code by
+implicitly making every function return an error value and checking it. You
+could also make explicit use of setjmp/longjmp. There are many different ways
+to go here.</li>
+
+<li><b>object orientation, generics, database access, complex numbers,
+geometric programming, ...</b> - Really, there is
+no end of crazy features that you can add to the language.</li>
+
+<li><b>unusual domains</b> - We've been talking about applying LLVM to a domain
+that many people are interested in: building a compiler for a specific language.
+However, there are many other domains that can use compiler technology that are
+not typically considered. For example, LLVM has been used to implement OpenGL
+graphics acceleration, translate C++ code to ActionScript, and many other
+cute and clever things. Maybe you will be the first to JIT compile a regular
+expression interpreter into native code with LLVM?</li>
+
+</ul>
+
+<p>
+Have fun - try doing something crazy and unusual. Building a language like
+everyone else always has, is much less fun than trying something a little crazy
+or off the wall and seeing how it turns out. If you get stuck or want to talk
+about it, feel free to email the <a
+href="http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev">llvmdev mailing
+list</a>: it has lots of people who are interested in languages and are often
+willing to help out.
+</p>
+
+<p>Before we end this tutorial, I want to talk about some "tips and tricks" for generating
+LLVM IR. These are some of the more subtle things that may not be obvious, but
+are very useful if you want to take advantage of LLVM's capabilities.</p>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="llvmirproperties">Properties of the LLVM IR</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>We have a couple common questions about code in the LLVM IR form - lets just
+get these out of the way right now, shall we?</p>
+
+<!-- ======================================================================= -->
+<h4><a name="targetindep">Target Independence</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>Kaleidoscope is an example of a "portable language": any program written in
+Kaleidoscope will work the same way on any target that it runs on. Many other
+languages have this property, e.g. lisp, java, haskell, javascript, python, etc
+(note that while these languages are portable, not all their libraries are).</p>
+
+<p>One nice aspect of LLVM is that it is often capable of preserving target
+independence in the IR: you can take the LLVM IR for a Kaleidoscope-compiled
+program and run it on any target that LLVM supports, even emitting C code and
+compiling that on targets that LLVM doesn't support natively. You can trivially
+tell that the Kaleidoscope compiler generates target-independent code because it
+never queries for any target-specific information when generating code.</p>
+
+<p>The fact that LLVM provides a compact, target-independent, representation for
+code gets a lot of people excited. Unfortunately, these people are usually
+thinking about C or a language from the C family when they are asking questions
+about language portability. I say "unfortunately", because there is really no
+way to make (fully general) C code portable, other than shipping the source code
+around (and of course, C source code is not actually portable in general
+either - ever port a really old application from 32- to 64-bits?).</p>
+
+<p>The problem with C (again, in its full generality) is that it is heavily
+laden with target specific assumptions. As one simple example, the preprocessor
+often destructively removes target-independence from the code when it processes
+the input text:</p>
+
+<div class="doc_code">
+<pre>
+#ifdef __i386__
+ int X = 1;
+#else
+ int X = 42;
+#endif
+</pre>
+</div>
+
+<p>While it is possible to engineer more and more complex solutions to problems
+like this, it cannot be solved in full generality in a way that is better than shipping
+the actual source code.</p>
+
+<p>That said, there are interesting subsets of C that can be made portable. If
+you are willing to fix primitive types to a fixed size (say int = 32-bits,
+and long = 64-bits), don't care about ABI compatibility with existing binaries,
+and are willing to give up some other minor features, you can have portable
+code. This can make sense for specialized domains such as an
+in-kernel language.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="safety">Safety Guarantees</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>Many of the languages above are also "safe" languages: it is impossible for
+a program written in Java to corrupt its address space and crash the process
+(assuming the JVM has no bugs).
+Safety is an interesting property that requires a combination of language
+design, runtime support, and often operating system support.</p>
+
+<p>It is certainly possible to implement a safe language in LLVM, but LLVM IR
+does not itself guarantee safety. The LLVM IR allows unsafe pointer casts,
+use after free bugs, buffer over-runs, and a variety of other problems. Safety
+needs to be implemented as a layer on top of LLVM and, conveniently, several
+groups have investigated this. Ask on the <a
+href="http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev">llvmdev mailing
+list</a> if you are interested in more details.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="langspecific">Language-Specific Optimizations</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>One thing about LLVM that turns off many people is that it does not solve all
+the world's problems in one system (sorry 'world hunger', someone else will have
+to solve you some other day). One specific complaint is that people perceive
+LLVM as being incapable of performing high-level language-specific optimization:
+LLVM "loses too much information".</p>
+
+<p>Unfortunately, this is really not the place to give you a full and unified
+version of "Chris Lattner's theory of compiler design". Instead, I'll make a
+few observations:</p>
+
+<p>First, you're right that LLVM does lose information. For example, as of this
+writing, there is no way to distinguish in the LLVM IR whether an SSA-value came
+from a C "int" or a C "long" on an ILP32 machine (other than debug info). Both
+get compiled down to an 'i32' value and the information about what it came from
+is lost. The more general issue here, is that the LLVM type system uses
+"structural equivalence" instead of "name equivalence". Another place this
+surprises people is if you have two types in a high-level language that have the
+same structure (e.g. two different structs that have a single int field): these
+types will compile down into a single LLVM type and it will be impossible to
+tell what it came from.</p>
+
+<p>Second, while LLVM does lose information, LLVM is not a fixed target: we
+continue to enhance and improve it in many different ways. In addition to
+adding new features (LLVM did not always support exceptions or debug info), we
+also extend the IR to capture important information for optimization (e.g.
+whether an argument is sign or zero extended, information about pointers
+aliasing, etc). Many of the enhancements are user-driven: people want LLVM to
+include some specific feature, so they go ahead and extend it.</p>
+
+<p>Third, it is <em>possible and easy</em> to add language-specific
+optimizations, and you have a number of choices in how to do it. As one trivial
+example, it is easy to add language-specific optimization passes that
+"know" things about code compiled for a language. In the case of the C family,
+there is an optimization pass that "knows" about the standard C library
+functions. If you call "exit(0)" in main(), it knows that it is safe to
+optimize that into "return 0;" because C specifies what the 'exit'
+function does.</p>
+
+<p>In addition to simple library knowledge, it is possible to embed a variety of
+other language-specific information into the LLVM IR. If you have a specific
+need and run into a wall, please bring the topic up on the llvmdev list. At the
+very worst, you can always treat LLVM as if it were a "dumb code generator" and
+implement the high-level optimizations you desire in your front-end, on the
+language-specific AST.
+</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<h2><a name="tipsandtricks">Tips and Tricks</a></h2>
+<!-- *********************************************************************** -->
+
+<div>
+
+<p>There is a variety of useful tips and tricks that you come to know after
+working on/with LLVM that aren't obvious at first glance. Instead of letting
+everyone rediscover them, this section talks about some of these issues.</p>
+
+<!-- ======================================================================= -->
+<h4><a name="offsetofsizeof">Implementing portable offsetof/sizeof</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>One interesting thing that comes up, if you are trying to keep the code
+generated by your compiler "target independent", is that you often need to know
+the size of some LLVM type or the offset of some field in an llvm structure.
+For example, you might need to pass the size of a type into a function that
+allocates memory.</p>
+
+<p>Unfortunately, this can vary widely across targets: for example the width of
+a pointer is trivially target-specific. However, there is a <a
+href="http://nondot.org/sabre/LLVMNotes/SizeOf-OffsetOf-VariableSizedStructs.txt">clever
+way to use the getelementptr instruction</a> that allows you to compute this
+in a portable way.</p>
+
+</div>
+
+<!-- ======================================================================= -->
+<h4><a name="gcstack">Garbage Collected Stack Frames</a></h4>
+<!-- ======================================================================= -->
+
+<div>
+
+<p>Some languages want to explicitly manage their stack frames, often so that
+they are garbage collected or to allow easy implementation of closures. There
+are often better ways to implement these features than explicit stack frames,
+but <a
+href="http://nondot.org/sabre/LLVMNotes/ExplicitlyManagedStackFrames.txt">LLVM
+does support them,</a> if you want. It requires your front-end to convert the
+code into <a
+href="http://en.wikipedia.org/wiki/Continuation-passing_style">Continuation
+Passing Style</a> and the use of tail calls (which LLVM also supports).</p>
+
+</div>
+
+</div>
+
+<!-- *********************************************************************** -->
+<hr>
+<address>
+ <a href="http://jigsaw.w3.org/css-validator/check/referer"><img
+ src="http://jigsaw.w3.org/css-validator/images/vcss" alt="Valid CSS!"></a>
+ <a href="http://validator.w3.org/check/referer"><img
+ src="http://www.w3.org/Icons/valid-html401" alt="Valid HTML 4.01!"></a>
+
+ <a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
+ <a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
+ Last modified: $Date$
+</address>
+</body>
+</html>
diff --git a/docs/tutorial/index.html b/docs/tutorial/index.html
new file mode 100644
index 00000000000..2c11a9a48b3
--- /dev/null
+++ b/docs/tutorial/index.html
@@ -0,0 +1,48 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <title>LLVM Tutorial: Table of Contents</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta name="author" content="Owen Anderson">
+ <meta name="description"
+ content="LLVM Tutorial: Table of Contents.">
+ <link rel="stylesheet" href="../_static/llvm.css" type="text/css">
+</head>
+
+<body>
+
+<h1>LLVM Tutorial: Table of Contents</h1>
+
+<ol>
+ <li>Kaleidoscope: Implementing a Language with LLVM
+ <ol>
+ <li><a href="LangImpl1.html">Tutorial Introduction and the Lexer</a></li>
+ <li><a href="LangImpl2.html">Implementing a Parser and AST</a></li>
+ <li><a href="LangImpl3.html">Implementing Code Generation to LLVM IR</a></li>
+ <li><a href="LangImpl4.html">Adding JIT and Optimizer Support</a></li>
+ <li><a href="LangImpl5.html">Extending the language: control flow</a></li>
+ <li><a href="LangImpl6.html">Extending the language: user-defined operators</a></li>
+ <li><a href="LangImpl7.html">Extending the language: mutable variables / SSA construction</a></li>
+ <li><a href="LangImpl8.html">Conclusion and other useful LLVM tidbits</a></li>
+ </ol></li>
+ <li>Kaleidoscope: Implementing a Language with LLVM in Objective Caml
+ <ol>
+ <li><a href="OCamlLangImpl1.html">Tutorial Introduction and the Lexer</a></li>
+ <li><a href="OCamlLangImpl2.html">Implementing a Parser and AST</a></li>
+ <li><a href="OCamlLangImpl3.html">Implementing Code Generation to LLVM IR</a></li>
+ <li><a href="OCamlLangImpl4.html">Adding JIT and Optimizer Support</a></li>
+ <li><a href="OCamlLangImpl5.html">Extending the language: control flow</a></li>
+ <li><a href="OCamlLangImpl6.html">Extending the language: user-defined operators</a></li>
+ <li><a href="OCamlLangImpl7.html">Extending the language: mutable variables / SSA construction</a></li>
+ <li><a href="OCamlLangImpl8.html">Conclusion and other useful LLVM tidbits</a></li>
+ </ol></li>
+ <li>Advanced Topics
+ <ol>
+ <li><a href="http://llvm.org/pubs/2004-09-22-LCPCLLVMTutorial.html">Writing
+ an Optimization for LLVM</a></li>
+ </ol></li>
+</ol>
+
+</body>
+</html>
diff --git a/docs/userguides.rst b/docs/userguides.rst
new file mode 100644
index 00000000000..fa6e3cfff23
--- /dev/null
+++ b/docs/userguides.rst
@@ -0,0 +1,90 @@
+.. _userguides:
+
+User Guides
+===========
+
+.. toctree::
+ :hidden:
+
+ CMake
+ CommandGuide/index
+ DeveloperPolicy
+ GettingStartedVS
+ FAQ
+ Lexicon
+ Packaging
+ HowToAddABuilder
+
+* `The LLVM Getting Started Guide <GettingStarted.html>`_
+
+ Discusses how to get up and running quickly with the LLVM infrastructure.
+ Everything from unpacking and compilation of the distribution to execution
+ of some tools.
+
+* :ref:`building-with-cmake`
+
+ An addendum to the main Getting Started guide for those using the `CMake
+ build system <http://www.cmake.org>`_.
+
+* `Getting Started with the LLVM System using Microsoft Visual Studio
+ <GettingStartedVS.html>`_
+
+ An addendum to the main Getting Started guide for those using Visual Studio
+ on Windows.
+
+* `LLVM Tutorial <tutorial/>`_
+
+ A walk through the process of using LLVM for a custom language, and the
+ facilities LLVM offers in tutorial form.
+
+* :ref:`developer_policy`
+
+ The LLVM project's policy towards developers and their contributions.
+
+* :ref:`LLVM Command Guide <commands>`
+
+ A reference manual for the LLVM command line utilities ("man" pages for LLVM
+ tools).
+
+* `LLVM's Analysis and Transform Passes <Passes.html>`_
+
+ A list of optimizations and analyses implemented in LLVM.
+
+* :ref:`faq`
+
+ A list of common questions and problems and their solutions.
+
+* `Release notes for the current release <ReleaseNotes.html>`_
+
+ This describes new features, known bugs, and other limitations.
+
+* `How to Submit A Bug Report <HowToSubmitABug.html>`_
+
+ Instructions for properly submitting information about any bugs you run into
+ in the LLVM system.
+
+* `LLVM Testing Infrastructure Guide <TestingGuide.html>`_
+
+ A reference manual for using the LLVM testing infrastructure.
+
+* `How to build the C, C++, ObjC, and ObjC++ front end <http://clang.llvm.org/get_started.html>`_
+
+ Instructions for building the clang front-end from source.
+
+* :ref:`packaging`
+
+ Advice on packaging LLVM into a distribution.
+
+* :ref:`lexicon`
+
+ Definition of acronyms, terms and concepts used in LLVM.
+
+* :ref:`how_to_add_a_builder`
+
+ Instructions for adding new builder to LLVM buildbot master.
+
+* **IRC** -- You can probably find help on the unofficial LLVM IRC.
+
+ We often are on irc.oftc.net in the #llvm channel. If you are using the
+ mozilla browser, and have chatzilla installed, you can `join #llvm on
+ irc.oftc.net <irc://irc.oftc.net/llvm>`_.
diff --git a/docs/yaml2obj.rst b/docs/yaml2obj.rst
new file mode 100644
index 00000000000..cb59162e5ac
--- /dev/null
+++ b/docs/yaml2obj.rst
@@ -0,0 +1,222 @@
+.. _yaml2obj:
+
+yaml2obj
+========
+
+yaml2obj takes a YAML description of an object file and converts it to a binary
+file.
+
+ $ yaml2py input-file
+
+.. program:: yaml2py
+
+Outputs the binary to stdout.
+
+COFF Syntax
+-----------
+
+Here's a sample COFF file.
+
+.. code-block:: yaml
+
+ header:
+ Machine: IMAGE_FILE_MACHINE_I386 # (0x14C)
+
+ sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE
+ , IMAGE_SCN_ALIGN_16BYTES
+ , IMAGE_SCN_MEM_EXECUTE
+ , IMAGE_SCN_MEM_READ
+ ] # 0x60500020
+ SectionData:
+ "\x83\xEC\x0C\xC7\x44\x24\x08\x00\x00\x00\x00\xC7\x04\x24\x00\x00\x00\x00\xE8\x00\x00\x00\x00\xE8\x00\x00\x00\x00\x8B\x44\x24\x08\x83\xC4\x0C\xC3" # |....D$.......$...............D$.....|
+
+ symbols:
+ - Name: .text
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL # (0)
+ ComplexType: IMAGE_SYM_DTYPE_NULL # (0)
+ StorageClass: IMAGE_SYM_CLASS_STATIC # (3)
+ NumberOfAuxSymbols: 1
+ AuxillaryData:
+ "\x24\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00" # |$.................|
+
+ - Name: _main
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL # (0)
+ ComplexType: IMAGE_SYM_DTYPE_NULL # (0)
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL # (2)
+
+Here's a simplified Kwalify_ schema with an extension to allow alternate types.
+
+.. _Kwalify: http://www.kuwata-lab.com/kwalify/ruby/users-guide.html
+
+.. code-block:: yaml
+
+ type: map
+ mapping:
+ header:
+ type: map
+ mapping:
+ Machine: [ {type: str, enum:
+ [ IMAGE_FILE_MACHINE_UNKNOWN
+ , IMAGE_FILE_MACHINE_AM33
+ , IMAGE_FILE_MACHINE_AMD64
+ , IMAGE_FILE_MACHINE_ARM
+ , IMAGE_FILE_MACHINE_ARMV7
+ , IMAGE_FILE_MACHINE_EBC
+ , IMAGE_FILE_MACHINE_I386
+ , IMAGE_FILE_MACHINE_IA64
+ , IMAGE_FILE_MACHINE_M32R
+ , IMAGE_FILE_MACHINE_MIPS16
+ , IMAGE_FILE_MACHINE_MIPSFPU
+ , IMAGE_FILE_MACHINE_MIPSFPU16
+ , IMAGE_FILE_MACHINE_POWERPC
+ , IMAGE_FILE_MACHINE_POWERPCFP
+ , IMAGE_FILE_MACHINE_R4000
+ , IMAGE_FILE_MACHINE_SH3
+ , IMAGE_FILE_MACHINE_SH3DSP
+ , IMAGE_FILE_MACHINE_SH4
+ , IMAGE_FILE_MACHINE_SH5
+ , IMAGE_FILE_MACHINE_THUMB
+ , IMAGE_FILE_MACHINE_WCEMIPSV2
+ ]}
+ , {type: int}
+ ]
+ Characteristics:
+ - type: seq
+ sequence:
+ - type: str
+ enum: [ IMAGE_FILE_RELOCS_STRIPPED
+ , IMAGE_FILE_EXECUTABLE_IMAGE
+ , IMAGE_FILE_LINE_NUMS_STRIPPED
+ , IMAGE_FILE_LOCAL_SYMS_STRIPPED
+ , IMAGE_FILE_AGGRESSIVE_WS_TRIM
+ , IMAGE_FILE_LARGE_ADDRESS_AWARE
+ , IMAGE_FILE_BYTES_REVERSED_LO
+ , IMAGE_FILE_32BIT_MACHINE
+ , IMAGE_FILE_DEBUG_STRIPPED
+ , IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP
+ , IMAGE_FILE_NET_RUN_FROM_SWAP
+ , IMAGE_FILE_SYSTEM
+ , IMAGE_FILE_DLL
+ , IMAGE_FILE_UP_SYSTEM_ONLY
+ , IMAGE_FILE_BYTES_REVERSED_HI
+ ]
+ - type: int
+ sections:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ Name: {type: str}
+ Characteristics:
+ - type: seq
+ sequence:
+ - type: str
+ enum: [ IMAGE_SCN_TYPE_NO_PAD
+ , IMAGE_SCN_CNT_CODE
+ , IMAGE_SCN_CNT_INITIALIZED_DATA
+ , IMAGE_SCN_CNT_UNINITIALIZED_DATA
+ , IMAGE_SCN_LNK_OTHER
+ , IMAGE_SCN_LNK_INFO
+ , IMAGE_SCN_LNK_REMOVE
+ , IMAGE_SCN_LNK_COMDAT
+ , IMAGE_SCN_GPREL
+ , IMAGE_SCN_MEM_PURGEABLE
+ , IMAGE_SCN_MEM_16BIT
+ , IMAGE_SCN_MEM_LOCKED
+ , IMAGE_SCN_MEM_PRELOAD
+ , IMAGE_SCN_ALIGN_1BYTES
+ , IMAGE_SCN_ALIGN_2BYTES
+ , IMAGE_SCN_ALIGN_4BYTES
+ , IMAGE_SCN_ALIGN_8BYTES
+ , IMAGE_SCN_ALIGN_16BYTES
+ , IMAGE_SCN_ALIGN_32BYTES
+ , IMAGE_SCN_ALIGN_64BYTES
+ , IMAGE_SCN_ALIGN_128BYTES
+ , IMAGE_SCN_ALIGN_256BYTES
+ , IMAGE_SCN_ALIGN_512BYTES
+ , IMAGE_SCN_ALIGN_1024BYTES
+ , IMAGE_SCN_ALIGN_2048BYTES
+ , IMAGE_SCN_ALIGN_4096BYTES
+ , IMAGE_SCN_ALIGN_8192BYTES
+ , IMAGE_SCN_LNK_NRELOC_OVFL
+ , IMAGE_SCN_MEM_DISCARDABLE
+ , IMAGE_SCN_MEM_NOT_CACHED
+ , IMAGE_SCN_MEM_NOT_PAGED
+ , IMAGE_SCN_MEM_SHARED
+ , IMAGE_SCN_MEM_EXECUTE
+ , IMAGE_SCN_MEM_READ
+ , IMAGE_SCN_MEM_WRITE
+ ]
+ - type: int
+ SectionData: {type: str}
+ symbols:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ Name: {type: str}
+ Value: {type: int}
+ SectionNumber: {type: int}
+ SimpleType: [ {type: str, enum: [ IMAGE_SYM_TYPE_NULL
+ , IMAGE_SYM_TYPE_VOID
+ , IMAGE_SYM_TYPE_CHAR
+ , IMAGE_SYM_TYPE_SHORT
+ , IMAGE_SYM_TYPE_INT
+ , IMAGE_SYM_TYPE_LONG
+ , IMAGE_SYM_TYPE_FLOAT
+ , IMAGE_SYM_TYPE_DOUBLE
+ , IMAGE_SYM_TYPE_STRUCT
+ , IMAGE_SYM_TYPE_UNION
+ , IMAGE_SYM_TYPE_ENUM
+ , IMAGE_SYM_TYPE_MOE
+ , IMAGE_SYM_TYPE_BYTE
+ , IMAGE_SYM_TYPE_WORD
+ , IMAGE_SYM_TYPE_UINT
+ , IMAGE_SYM_TYPE_DWORD
+ ]}
+ , {type: int}
+ ]
+ ComplexType: [ {type: str, enum: [ IMAGE_SYM_DTYPE_NULL
+ , IMAGE_SYM_DTYPE_POINTER
+ , IMAGE_SYM_DTYPE_FUNCTION
+ , IMAGE_SYM_DTYPE_ARRAY
+ ]}
+ , {type: int}
+ ]
+ StorageClass: [ {type: str, enum:
+ [ IMAGE_SYM_CLASS_END_OF_FUNCTION
+ , IMAGE_SYM_CLASS_NULL
+ , IMAGE_SYM_CLASS_AUTOMATIC
+ , IMAGE_SYM_CLASS_EXTERNAL
+ , IMAGE_SYM_CLASS_STATIC
+ , IMAGE_SYM_CLASS_REGISTER
+ , IMAGE_SYM_CLASS_EXTERNAL_DEF
+ , IMAGE_SYM_CLASS_LABEL
+ , IMAGE_SYM_CLASS_UNDEFINED_LABEL
+ , IMAGE_SYM_CLASS_MEMBER_OF_STRUCT
+ , IMAGE_SYM_CLASS_ARGUMENT
+ , IMAGE_SYM_CLASS_STRUCT_TAG
+ , IMAGE_SYM_CLASS_MEMBER_OF_UNION
+ , IMAGE_SYM_CLASS_UNION_TAG
+ , IMAGE_SYM_CLASS_TYPE_DEFINITION
+ , IMAGE_SYM_CLASS_UNDEFINED_STATIC
+ , IMAGE_SYM_CLASS_ENUM_TAG
+ , IMAGE_SYM_CLASS_MEMBER_OF_ENUM
+ , IMAGE_SYM_CLASS_REGISTER_PARAM
+ , IMAGE_SYM_CLASS_BIT_FIELD
+ , IMAGE_SYM_CLASS_BLOCK
+ , IMAGE_SYM_CLASS_FUNCTION
+ , IMAGE_SYM_CLASS_END_OF_STRUCT
+ , IMAGE_SYM_CLASS_FILE
+ , IMAGE_SYM_CLASS_SECTION
+ , IMAGE_SYM_CLASS_WEAK_EXTERNAL
+ , IMAGE_SYM_CLASS_CLR_TOKEN
+ ]}
+ , {type: int}
+ ]