diff options
author | Amjad Aboud <amjad.aboud@intel.com> | 2015-12-21 14:07:14 +0000 |
---|---|---|
committer | Amjad Aboud <amjad.aboud@intel.com> | 2015-12-21 14:07:14 +0000 |
commit | 9889174eadb0f269ef132b3bd34a9f6fe3baa642 (patch) | |
tree | 399cb797fd18dde39b150ffa2644aae74be3bdd5 | |
parent | ba4a99ac26579b1dc9e88b059138a8ed6e7bd929 (diff) |
Implemented Support of IA interrupt and exception handlers:
http://lists.llvm.org/pipermail/cfe-dev/2015-September/045171.html
Differential Revision: http://reviews.llvm.org/D15567
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@256155 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | include/llvm/IR/CallingConv.h | 7 | ||||
-rw-r--r-- | lib/AsmParser/LLLexer.cpp | 1 | ||||
-rw-r--r-- | lib/AsmParser/LLParser.cpp | 2 | ||||
-rw-r--r-- | lib/AsmParser/LLToken.h | 1 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 5 | ||||
-rw-r--r-- | lib/IR/AsmWriter.cpp | 1 | ||||
-rw-r--r-- | lib/Target/X86/X86CallingConv.td | 15 | ||||
-rw-r--r-- | lib/Target/X86/X86ExpandPseudo.cpp | 387 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 45 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.h | 3 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrControl.td | 13 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrInfo.td | 2 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrSystem.td | 6 | ||||
-rw-r--r-- | lib/Target/X86/X86RegisterInfo.cpp | 30 | ||||
-rw-r--r-- | test/CodeGen/X86/x86-32-intrcc.ll | 79 | ||||
-rw-r--r-- | test/CodeGen/X86/x86-64-intrcc.ll | 86 |
16 files changed, 485 insertions, 198 deletions
diff --git a/include/llvm/IR/CallingConv.h b/include/llvm/IR/CallingConv.h index 8204d3e2e81..bc050928266 100644 --- a/include/llvm/IR/CallingConv.h +++ b/include/llvm/IR/CallingConv.h @@ -161,6 +161,13 @@ namespace CallingConv { /// \brief HHVM calling convention for invoking C/C++ helpers. HHVM_C = 82, + /// X86_INTR - x86 hardware interrupt context. Callee may take one or two + /// parameters, where the 1st represents a pointer to hardware context frame + /// and the 2nd represents hardware error code, the presence of the later + /// depends on the interrupt vector taken. Valid for both 32- and 64-bit + /// subtargets. + X86_INTR = 83, + /// The highest possible calling convention ID. Must be some 2^k - 1. MaxID = 1023 }; diff --git a/lib/AsmParser/LLLexer.cpp b/lib/AsmParser/LLLexer.cpp index b1bc5682193..26eca230bb3 100644 --- a/lib/AsmParser/LLLexer.cpp +++ b/lib/AsmParser/LLLexer.cpp @@ -591,6 +591,7 @@ lltok::Kind LLLexer::LexIdentifier() { KEYWORD(preserve_mostcc); KEYWORD(preserve_allcc); KEYWORD(ghccc); + KEYWORD(x86_intrcc); KEYWORD(hhvmcc); KEYWORD(hhvm_ccc); KEYWORD(cxx_fast_tlscc); diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp index d0bd31c4217..3471a2dbd05 100644 --- a/lib/AsmParser/LLParser.cpp +++ b/lib/AsmParser/LLParser.cpp @@ -1546,6 +1546,7 @@ bool LLParser::ParseOptionalDLLStorageClass(unsigned &Res) { /// ::= 'preserve_mostcc' /// ::= 'preserve_allcc' /// ::= 'ghccc' +/// ::= 'x86_intrcc' /// ::= 'hhvmcc' /// ::= 'hhvm_ccc' /// ::= 'cxx_fast_tlscc' @@ -1577,6 +1578,7 @@ bool LLParser::ParseOptionalCallingConv(unsigned &CC) { case lltok::kw_preserve_mostcc:CC = CallingConv::PreserveMost; break; case lltok::kw_preserve_allcc: CC = CallingConv::PreserveAll; break; case lltok::kw_ghccc: CC = CallingConv::GHC; break; + case lltok::kw_x86_intrcc: CC = CallingConv::X86_INTR; break; case lltok::kw_hhvmcc: CC = CallingConv::HHVM; break; case lltok::kw_hhvm_ccc: CC = CallingConv::HHVM_C; break; case lltok::kw_cxx_fast_tlscc: CC = CallingConv::CXX_FAST_TLS; break; diff --git a/lib/AsmParser/LLToken.h b/lib/AsmParser/LLToken.h index a6632e4f29c..29a7f16d3c2 100644 --- a/lib/AsmParser/LLToken.h +++ b/lib/AsmParser/LLToken.h @@ -100,6 +100,7 @@ namespace lltok { kw_webkit_jscc, kw_anyregcc, kw_preserve_mostcc, kw_preserve_allcc, kw_ghccc, + kw_x86_intrcc, kw_hhvmcc, kw_hhvm_ccc, kw_cxx_fast_tlscc, diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 544c7e7631d..e6e6cdc9ca3 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7361,6 +7361,11 @@ void SelectionDAGISel::LowerArguments(const Function &F) { // in the various CC lowering callbacks. Flags.setByVal(); } + if (F.getCallingConv() == CallingConv::X86_INTR) { + // IA Interrupt passes frame (1st parameter) by value in the stack. + if (Idx == 1) + Flags.setByVal(); + } if (Flags.isByVal() || Flags.isInAlloca()) { PointerType *Ty = cast<PointerType>(I->getType()); Type *ElementTy = Ty->getElementType(); diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp index 124fc56e78f..185db47f07e 100644 --- a/lib/IR/AsmWriter.cpp +++ b/lib/IR/AsmWriter.cpp @@ -313,6 +313,7 @@ static void PrintCallingConv(unsigned cc, raw_ostream &Out) { case CallingConv::X86_64_Win64: Out << "x86_64_win64cc"; break; case CallingConv::SPIR_FUNC: Out << "spir_func"; break; case CallingConv::SPIR_KERNEL: Out << "spir_kernel"; break; + case CallingConv::X86_INTR: Out << "x86_intrcc"; break; case CallingConv::HHVM: Out << "hhvmcc"; break; case CallingConv::HHVM_C: Out << "hhvm_ccc"; break; } diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td index f9b26b27a54..26100a56ee6 100644 --- a/lib/Target/X86/X86CallingConv.td +++ b/lib/Target/X86/X86CallingConv.td @@ -739,6 +739,14 @@ def CC_Intel_OCL_BI : CallingConv<[ CCDelegateTo<CC_X86_32_C> ]>; +def CC_X86_32_Intr : CallingConv<[ + CCAssignToStack<4, 4> +]>; + +def CC_X86_64_Intr : CallingConv<[ + CCAssignToStack<8, 8> +]>; + //===----------------------------------------------------------------------===// // X86 Root Argument Calling Conventions //===----------------------------------------------------------------------===// @@ -751,6 +759,7 @@ def CC_X86_32 : CallingConv<[ CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>, CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>, CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_32_HiPE>>, + CCIfCC<"CallingConv::X86_INTR", CCDelegateTo<CC_X86_32_Intr>>, // Otherwise, drop to normal X86-32 CC CCDelegateTo<CC_X86_32_C> @@ -767,6 +776,7 @@ def CC_X86_64 : CallingConv<[ CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win64_VectorCall>>, CCIfCC<"CallingConv::HHVM", CCDelegateTo<CC_X86_64_HHVM>>, CCIfCC<"CallingConv::HHVM_C", CCDelegateTo<CC_X86_64_HHVM_C>>, + CCIfCC<"CallingConv::X86_INTR", CCDelegateTo<CC_X86_64_Intr>>, // Mingw64 and native Win64 use Win64 CC CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>, @@ -817,6 +827,11 @@ def CSR_64_MostRegs : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, R15, RBP, (sequence "XMM%u", 0, 15))>; +def CSR_32_AllRegs : CalleeSavedRegs<(add EAX, EBX, ECX, EDX, EBP, ESI, + EDI, ESP)>; +def CSR_32_AllRegs_SSE : CalleeSavedRegs<(add CSR_32_AllRegs, + (sequence "XMM%u", 0, 7))>; + def CSR_64_AllRegs : CalleeSavedRegs<(add CSR_64_MostRegs, RAX, RSP, (sequence "XMM%u", 16, 31))>; def CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX, RSP, diff --git a/lib/Target/X86/X86ExpandPseudo.cpp b/lib/Target/X86/X86ExpandPseudo.cpp index 83a62b731b5..3c296df617e 100644 --- a/lib/Target/X86/X86ExpandPseudo.cpp +++ b/lib/Target/X86/X86ExpandPseudo.cpp @@ -1,189 +1,198 @@ -//===------- X86ExpandPseudo.cpp - Expand pseudo instructions -------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains a pass that expands pseudo instructions into target -// instructions to allow proper scheduling, if-conversion, other late -// optimizations, or simply the encoding of the instructions. -// -//===----------------------------------------------------------------------===// - -#include "X86.h" -#include "X86FrameLowering.h" -#include "X86InstrBuilder.h" -#include "X86InstrInfo.h" -#include "X86MachineFunctionInfo.h" -#include "X86Subtarget.h" -#include "llvm/Analysis/EHPersonalities.h" -#include "llvm/CodeGen/Passes.h" // For IDs of passes that are preserved. -#include "llvm/CodeGen/MachineFunctionPass.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/IR/GlobalValue.h" -using namespace llvm; - -#define DEBUG_TYPE "x86-pseudo" - -namespace { -class X86ExpandPseudo : public MachineFunctionPass { -public: - static char ID; - X86ExpandPseudo() : MachineFunctionPass(ID) {} - - void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.setPreservesCFG(); - AU.addPreservedID(MachineLoopInfoID); - AU.addPreservedID(MachineDominatorsID); - MachineFunctionPass::getAnalysisUsage(AU); - } - - const X86Subtarget *STI; - const X86InstrInfo *TII; - const X86RegisterInfo *TRI; - const X86FrameLowering *X86FL; - - bool runOnMachineFunction(MachineFunction &Fn) override; - - const char *getPassName() const override { - return "X86 pseudo instruction expansion pass"; - } - -private: - bool ExpandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI); - bool ExpandMBB(MachineBasicBlock &MBB); -}; -char X86ExpandPseudo::ID = 0; -} // End anonymous namespace. - -/// If \p MBBI is a pseudo instruction, this method expands -/// it to the corresponding (sequence of) actual instruction(s). -/// \returns true if \p MBBI has been expanded. -bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI) { - MachineInstr &MI = *MBBI; - unsigned Opcode = MI.getOpcode(); - DebugLoc DL = MBBI->getDebugLoc(); - switch (Opcode) { - default: - return false; - case X86::TCRETURNdi: - case X86::TCRETURNri: - case X86::TCRETURNmi: - case X86::TCRETURNdi64: - case X86::TCRETURNri64: - case X86::TCRETURNmi64: { - bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64; - MachineOperand &JumpTarget = MBBI->getOperand(0); - MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1); - assert(StackAdjust.isImm() && "Expecting immediate value."); - - // Adjust stack pointer. - int StackAdj = StackAdjust.getImm(); - - if (StackAdj) { - // Check for possible merge with preceding ADD instruction. - StackAdj += X86FL->mergeSPUpdates(MBB, MBBI, true); - X86FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true); - } - - // Jump to label or value in register. - bool IsWin64 = STI->isTargetWin64(); - if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdi64) { - unsigned Op = (Opcode == X86::TCRETURNdi) - ? X86::TAILJMPd - : (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64); - MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op)); - if (JumpTarget.isGlobal()) - MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(), - JumpTarget.getTargetFlags()); - else { - assert(JumpTarget.isSymbol()); - MIB.addExternalSymbol(JumpTarget.getSymbolName(), - JumpTarget.getTargetFlags()); - } - } else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) { - unsigned Op = (Opcode == X86::TCRETURNmi) - ? X86::TAILJMPm - : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64); - MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op)); - for (unsigned i = 0; i != 5; ++i) - MIB.addOperand(MBBI->getOperand(i)); - } else if (Opcode == X86::TCRETURNri64) { - BuildMI(MBB, MBBI, DL, - TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64)) - .addReg(JumpTarget.getReg(), RegState::Kill); - } else { - BuildMI(MBB, MBBI, DL, TII->get(X86::TAILJMPr)) - .addReg(JumpTarget.getReg(), RegState::Kill); - } - - MachineInstr *NewMI = std::prev(MBBI); - NewMI->copyImplicitOps(*MBBI->getParent()->getParent(), MBBI); - - // Delete the pseudo instruction TCRETURN. - MBB.erase(MBBI); - - return true; - } - case X86::EH_RETURN: - case X86::EH_RETURN64: { - MachineOperand &DestAddr = MBBI->getOperand(0); - assert(DestAddr.isReg() && "Offset should be in register!"); - const bool Uses64BitFramePtr = - STI->isTarget64BitLP64() || STI->isTargetNaCl64(); - unsigned StackPtr = TRI->getStackRegister(); - BuildMI(MBB, MBBI, DL, - TII->get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), StackPtr) - .addReg(DestAddr.getReg()); - // The EH_RETURN pseudo is really removed during the MC Lowering. - return true; - } - - case X86::EH_RESTORE: { - // Restore ESP and EBP, and optionally ESI if required. - bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality( - MBB.getParent()->getFunction()->getPersonalityFn())); - X86FL->restoreWin32EHStackPointers(MBB, MBBI, DL, /*RestoreSP=*/IsSEH); - MBBI->eraseFromParent(); - return true; - } - } - llvm_unreachable("Previous switch has a fallthrough?"); -} - -/// Expand all pseudo instructions contained in \p MBB. -/// \returns true if any expansion occurred for \p MBB. -bool X86ExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) { - bool Modified = false; - - // MBBI may be invalidated by the expansion. - MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); - while (MBBI != E) { - MachineBasicBlock::iterator NMBBI = std::next(MBBI); - Modified |= ExpandMI(MBB, MBBI); - MBBI = NMBBI; - } - - return Modified; -} - -bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) { - STI = &static_cast<const X86Subtarget &>(MF.getSubtarget()); - TII = STI->getInstrInfo(); - TRI = STI->getRegisterInfo(); - X86FL = STI->getFrameLowering(); - - bool Modified = false; - for (MachineBasicBlock &MBB : MF) - Modified |= ExpandMBB(MBB); - return Modified; -} - -/// Returns an instance of the pseudo instruction expansion pass. -FunctionPass *llvm::createX86ExpandPseudoPass() { - return new X86ExpandPseudo(); -} +//===------- X86ExpandPseudo.cpp - Expand pseudo instructions -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass that expands pseudo instructions into target
+// instructions to allow proper scheduling, if-conversion, other late
+// optimizations, or simply the encoding of the instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86FrameLowering.h"
+#include "X86InstrBuilder.h"
+#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/Passes.h" // For IDs of passes that are preserved.
+#include "llvm/IR/GlobalValue.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "x86-pseudo"
+
+namespace {
+class X86ExpandPseudo : public MachineFunctionPass {
+public:
+ static char ID;
+ X86ExpandPseudo() : MachineFunctionPass(ID) {}
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addPreservedID(MachineLoopInfoID);
+ AU.addPreservedID(MachineDominatorsID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ const X86Subtarget *STI;
+ const X86InstrInfo *TII;
+ const X86RegisterInfo *TRI;
+ const X86FrameLowering *X86FL;
+
+ bool runOnMachineFunction(MachineFunction &Fn) override;
+
+ const char *getPassName() const override {
+ return "X86 pseudo instruction expansion pass";
+ }
+
+private:
+ bool ExpandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
+ bool ExpandMBB(MachineBasicBlock &MBB);
+};
+char X86ExpandPseudo::ID = 0;
+} // End anonymous namespace.
+
+/// If \p MBBI is a pseudo instruction, this method expands
+/// it to the corresponding (sequence of) actual instruction(s).
+/// \returns true if \p MBBI has been expanded.
+bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ DebugLoc DL = MBBI->getDebugLoc();
+ switch (Opcode) {
+ default:
+ return false;
+ case X86::TCRETURNdi:
+ case X86::TCRETURNri:
+ case X86::TCRETURNmi:
+ case X86::TCRETURNdi64:
+ case X86::TCRETURNri64:
+ case X86::TCRETURNmi64: {
+ bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64;
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
+ assert(StackAdjust.isImm() && "Expecting immediate value.");
+
+ // Adjust stack pointer.
+ int StackAdj = StackAdjust.getImm();
+
+ if (StackAdj) {
+ // Check for possible merge with preceding ADD instruction.
+ StackAdj += X86FL->mergeSPUpdates(MBB, MBBI, true);
+ X86FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true);
+ }
+
+ // Jump to label or value in register.
+ bool IsWin64 = STI->isTargetWin64();
+ if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdi64) {
+ unsigned Op = (Opcode == X86::TCRETURNdi)
+ ? X86::TAILJMPd
+ : (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64);
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
+ if (JumpTarget.isGlobal())
+ MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
+ JumpTarget.getTargetFlags());
+ else {
+ assert(JumpTarget.isSymbol());
+ MIB.addExternalSymbol(JumpTarget.getSymbolName(),
+ JumpTarget.getTargetFlags());
+ }
+ } else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) {
+ unsigned Op = (Opcode == X86::TCRETURNmi)
+ ? X86::TAILJMPm
+ : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
+ for (unsigned i = 0; i != 5; ++i)
+ MIB.addOperand(MBBI->getOperand(i));
+ } else if (Opcode == X86::TCRETURNri64) {
+ BuildMI(MBB, MBBI, DL,
+ TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
+ .addReg(JumpTarget.getReg(), RegState::Kill);
+ } else {
+ BuildMI(MBB, MBBI, DL, TII->get(X86::TAILJMPr))
+ .addReg(JumpTarget.getReg(), RegState::Kill);
+ }
+
+ MachineInstr *NewMI = std::prev(MBBI);
+ NewMI->copyImplicitOps(*MBBI->getParent()->getParent(), MBBI);
+
+ // Delete the pseudo instruction TCRETURN.
+ MBB.erase(MBBI);
+
+ return true;
+ }
+ case X86::EH_RETURN:
+ case X86::EH_RETURN64: {
+ MachineOperand &DestAddr = MBBI->getOperand(0);
+ assert(DestAddr.isReg() && "Offset should be in register!");
+ const bool Uses64BitFramePtr =
+ STI->isTarget64BitLP64() || STI->isTargetNaCl64();
+ unsigned StackPtr = TRI->getStackRegister();
+ BuildMI(MBB, MBBI, DL,
+ TII->get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), StackPtr)
+ .addReg(DestAddr.getReg());
+ // The EH_RETURN pseudo is really removed during the MC Lowering.
+ return true;
+ }
+ case X86::IRET: {
+ // Adjust stack to erase error code
+ int64_t StackAdj = MBBI->getOperand(0).getImm();
+ X86FL->emitSPUpdate(MBB, MBBI, StackAdj, true);
+ // Replace pseudo with machine iret
+ BuildMI(MBB, MBBI, DL,
+ TII->get(STI->is64Bit() ? X86::IRET64 : X86::IRET32));
+ MBB.erase(MBBI);
+ return true;
+ }
+ case X86::EH_RESTORE: {
+ // Restore ESP and EBP, and optionally ESI if required.
+ bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(
+ MBB.getParent()->getFunction()->getPersonalityFn()));
+ X86FL->restoreWin32EHStackPointers(MBB, MBBI, DL, /*RestoreSP=*/IsSEH);
+ MBBI->eraseFromParent();
+ return true;
+ }
+ }
+ llvm_unreachable("Previous switch has a fallthrough?");
+}
+
+/// Expand all pseudo instructions contained in \p MBB.
+/// \returns true if any expansion occurred for \p MBB.
+bool X86ExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
+ bool Modified = false;
+
+ // MBBI may be invalidated by the expansion.
+ MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ while (MBBI != E) {
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ Modified |= ExpandMI(MBB, MBBI);
+ MBBI = NMBBI;
+ }
+
+ return Modified;
+}
+
+bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
+ STI = &static_cast<const X86Subtarget &>(MF.getSubtarget());
+ TII = STI->getInstrInfo();
+ TRI = STI->getRegisterInfo();
+ X86FL = STI->getFrameLowering();
+
+ bool Modified = false;
+ for (MachineBasicBlock &MBB : MF)
+ Modified |= ExpandMBB(MBB);
+ return Modified;
+}
+
+/// Returns an instance of the pseudo instruction expansion pass.
+FunctionPass *llvm::createX86ExpandPseudoPass() {
+ return new X86ExpandPseudo();
+}
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index d2a20a1dde7..04c22907b6a 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -2188,6 +2188,9 @@ X86TargetLowering::LowerReturn(SDValue Chain, MachineFunction &MF = DAG.getMachineFunction(); X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); + if (CallConv == CallingConv::X86_INTR && !Outs.empty()) + report_fatal_error("X86 interrupts may not return any value"); + SmallVector<CCValAssign, 16> RVLocs; CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext()); CCInfo.AnalyzeReturn(Outs, RetCC_X86); @@ -2301,7 +2304,10 @@ X86TargetLowering::LowerReturn(SDValue Chain, if (Flag.getNode()) RetOps.push_back(Flag); - return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps); + X86ISD::NodeType opcode = X86ISD::RET_FLAG; + if (CallConv == CallingConv::X86_INTR) + opcode = X86ISD::IRET; + return DAG.getNode(opcode, dl, MVT::Other, RetOps); } bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { @@ -2541,6 +2547,19 @@ X86TargetLowering::LowerMemArgument(SDValue Chain, else ValVT = VA.getValVT(); + // Calculate SP offset of interrupt parameter, re-arrange the slot normally + // taken by a return address. + int Offset = 0; + if (CallConv == CallingConv::X86_INTR) { + const X86Subtarget& Subtarget = + static_cast<const X86Subtarget&>(DAG.getSubtarget()); + // X86 interrupts may take one or two arguments. + // On the stack there will be no return address as in regular call. + // Offset of last argument need to be set to -4/-8 bytes. + // Where offset of the first argument out of two, should be set to 0 bytes. + Offset = (Subtarget.is64Bit() ? 8 : 4) * ((i + 1) % Ins.size() - 1); + } + // FIXME: For now, all byval parameter objects are marked mutable. This can be // changed with more analysis. // In case of tail call optimization mark all arguments mutable. Since they @@ -2549,10 +2568,19 @@ X86TargetLowering::LowerMemArgument(SDValue Chain, unsigned Bytes = Flags.getByValSize(); if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable); + // Adjust SP offset of interrupt parameter. + if (CallConv == CallingConv::X86_INTR) { + MFI->setObjectOffset(FI, Offset); + } return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); } else { int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, VA.getLocMemOffset(), isImmutable); + // Adjust SP offset of interrupt parameter. + if (CallConv == CallingConv::X86_INTR) { + MFI->setObjectOffset(FI, Offset); + } + SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); SDValue Val = DAG.getLoad( ValVT, dl, Chain, FIN, @@ -2632,6 +2660,14 @@ SDValue X86TargetLowering::LowerFormalArguments( assert(!(isVarArg && canGuaranteeTCO(CallConv)) && "Var args not supported with calling convention fastcc, ghc or hipe"); + if (CallConv == CallingConv::X86_INTR) { + bool isLegal = Ins.size() == 1 || + (Ins.size() == 2 && ((Is64Bit && Ins[1].VT == MVT::i64) || + (!Is64Bit && Ins[1].VT == MVT::i32))); + if (!isLegal) + report_fatal_error("X86 interrupts may take one or two arguments"); + } + // Assign locations to all of the incoming arguments. SmallVector<CCValAssign, 16> ArgLocs; CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); @@ -2891,6 +2927,9 @@ SDValue X86TargetLowering::LowerFormalArguments( if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, MF.getTarget().Options.GuaranteedTailCallOpt)) { FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. + } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) { + // X86 interrupts must pop the error code if present + FuncInfo->setBytesToPopOnReturn(Is64Bit ? 8 : 4); } else { FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. // If this is an sret function, the return should pop the hidden pointer. @@ -3021,6 +3060,9 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>(); auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls"); + if (CallConv == CallingConv::X86_INTR) + report_fatal_error("X86 interrupts may not be called directly"); + if (Attr.getValueAsString() == "true") isTailCall = false; @@ -20392,6 +20434,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::CMOV: return "X86ISD::CMOV"; case X86ISD::BRCOND: return "X86ISD::BRCOND"; case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; + case X86ISD::IRET: return "X86ISD::IRET"; case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 205b8126baf..bdec6137774 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -126,6 +126,9 @@ namespace llvm { /// 1 is the number of bytes of stack to pop. RET_FLAG, + /// Return from interrupt. Operand 0 is the number of bytes to pop. + IRET, + /// Repeat fill, corresponds to X86::REP_STOSx. REP_STOS, diff --git a/lib/Target/X86/X86InstrControl.td b/lib/Target/X86/X86InstrControl.td index 4cd5563ce72..8c351a51c46 100644 --- a/lib/Target/X86/X86InstrControl.td +++ b/lib/Target/X86/X86InstrControl.td @@ -53,6 +53,19 @@ let isTerminator = 1, isReturn = 1, isBarrier = 1, "{l}ret{|f}q\t$amt", [], IIC_RET>, Requires<[In64BitMode]>; def LRETIW : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt), "{l}ret{w|f}\t$amt", [], IIC_RET>, OpSize16; + + // The machine return from interrupt instruction, but sometimes we need to + // perform a post-epilogue stack adjustment. Codegen emits the pseudo form + // which expands to include an SP adjustment if necessary. + def IRET16 : I <0xcf, RawFrm, (outs), (ins), "iret{w}", [], IIC_IRET>, + OpSize16; + def IRET32 : I <0xcf, RawFrm, (outs), (ins), "iret{l|d}", [], + IIC_IRET>, OpSize32; + def IRET64 : RI <0xcf, RawFrm, (outs), (ins), "iretq", [], + IIC_IRET>, Requires<[In64BitMode]>; + let isCodeGenOnly = 1 in + def IRET : PseudoI<(outs), (ins i16imm:$adj), [(X86iret timm:$adj)]>; + } // Unconditional branches. diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 3f3fd82a6cd..217ea7b6c35 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -156,6 +156,8 @@ def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86caspair, def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret, [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; +def X86iret : SDNode<"X86ISD::IRET", SDTX86Ret, + [SDNPHasChain, SDNPOptInGlue]>; def X86vastart_save_xmm_regs : SDNode<"X86ISD::VASTART_SAVE_XMM_REGS", diff --git a/lib/Target/X86/X86InstrSystem.td b/lib/Target/X86/X86InstrSystem.td index 8222db5922a..ce2646e8bc1 100644 --- a/lib/Target/X86/X86InstrSystem.td +++ b/lib/Target/X86/X86InstrSystem.td @@ -60,12 +60,6 @@ def SYSEXIT : I<0x35, RawFrm, (outs), (ins), "sysexit{l}", [], IIC_SYS_ENTER_EXIT>, TB; def SYSEXIT64 :RI<0x35, RawFrm, (outs), (ins), "sysexit{q}", [], IIC_SYS_ENTER_EXIT>, TB, Requires<[In64BitMode]>; - -def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iret{w}", [], IIC_IRET>, OpSize16; -def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l|d}", [], IIC_IRET>, - OpSize32; -def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iretq", [], IIC_IRET>, - Requires<[In64BitMode]>; } // SchedRW def : Pat<(debugtrap), diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 88843763478..85ed5859a83 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -229,6 +229,7 @@ X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, const MCPhysReg * X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>(); + bool HasSSE = Subtarget.hasSSE1(); bool HasAVX = Subtarget.hasAVX(); bool HasAVX512 = Subtarget.hasAVX512(); bool CallsEHReturn = MF->getMMI().callsEHReturn(); @@ -277,6 +278,18 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { if (CallsEHReturn) return CSR_64EHRet_SaveList; return CSR_64_SaveList; + case CallingConv::X86_INTR: + if (Is64Bit) { + if (HasAVX) + return CSR_64_AllRegs_AVX_SaveList; + else + return CSR_64_AllRegs_SaveList; + } else { + if (HasSSE) + return CSR_32_AllRegs_SSE_SaveList; + else + return CSR_32_AllRegs_SaveList; + } default: break; } @@ -297,6 +310,7 @@ const uint32_t * X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF, CallingConv::ID CC) const { const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); + bool HasSSE = Subtarget.hasSSE1(); bool HasAVX = Subtarget.hasAVX(); bool HasAVX512 = Subtarget.hasAVX512(); @@ -337,12 +351,24 @@ X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF, if (Is64Bit) return CSR_64_MostRegs_RegMask; break; - default: - break; case CallingConv::X86_64_Win64: return CSR_Win64_RegMask; case CallingConv::X86_64_SysV: return CSR_64_RegMask; + case CallingConv::X86_INTR: + if (Is64Bit) { + if (HasAVX) + return CSR_64_AllRegs_AVX_RegMask; + else + return CSR_64_AllRegs_RegMask; + } else { + if (HasSSE) + return CSR_32_AllRegs_SSE_RegMask; + else + return CSR_32_AllRegs_RegMask; + } + default: + break; } // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check diff --git a/test/CodeGen/X86/x86-32-intrcc.ll b/test/CodeGen/X86/x86-32-intrcc.ll new file mode 100644 index 00000000000..908da3d1120 --- /dev/null +++ b/test/CodeGen/X86/x86-32-intrcc.ll @@ -0,0 +1,79 @@ +; RUN: llc -mtriple=i686-unknown-unknown < %s | FileCheck %s
+; RUN: llc -mtriple=i686-unknown-unknown -O0 < %s | FileCheck %s -check-prefix=CHECK0
+
+%struct.interrupt_frame = type { i32, i32, i32, i32, i32 }
+
+@llvm.used = appending global [3 x i8*] [i8* bitcast (void (%struct.interrupt_frame*)* @test_isr_no_ecode to i8*), i8* bitcast (void (%struct.interrupt_frame*, i32)* @test_isr_ecode to i8*), i8* bitcast (void (%struct.interrupt_frame*, i32)* @test_isr_clobbers to i8*)], section "llvm.metadata"
+
+; Spills eax, putting original esp at +4.
+; No stack adjustment if declared with no error code
+define x86_intrcc void @test_isr_no_ecode(%struct.interrupt_frame* %frame) {
+ ; CHECK-LABEL: test_isr_no_ecode:
+ ; CHECK: pushl %eax
+ ; CHECK: movl 12(%esp), %eax
+ ; CHECK: popl %eax
+ ; CHECK: iretl
+ ; CHECK0-LABEL: test_isr_no_ecode:
+ ; CHECK0: pushl %eax
+ ; CHECK0: leal 4(%esp), %eax
+ ; CHECK0: movl 8(%eax), %eax
+ ; CHECK0: popl %eax
+ ; CHECK0: iretl
+ %pflags = getelementptr inbounds %struct.interrupt_frame, %struct.interrupt_frame* %frame, i32 0, i32 2
+ %flags = load i32, i32* %pflags, align 4
+ call void asm sideeffect "", "r"(i32 %flags)
+ ret void
+}
+
+; Spills eax and ecx, putting original esp at +8. Stack is adjusted up another 4 bytes
+; before return, popping the error code.
+define x86_intrcc void @test_isr_ecode(%struct.interrupt_frame* %frame, i32 %ecode) {
+ ; CHECK-LABEL: test_isr_ecode
+ ; CHECK: pushl %ecx
+ ; CHECK: pushl %eax
+ ; CHECK: movl 8(%esp), %eax
+ ; CHECK: movl 20(%esp), %ecx
+ ; CHECK: popl %eax
+ ; CHECK: popl %ecx
+ ; CHECK: addl $4, %esp
+ ; CHECK: iretl
+ ; CHECK0-LABEL: test_isr_ecode
+ ; CHECK0: pushl %ecx
+ ; CHECK0: pushl %eax
+ ; CHECK0: movl 8(%esp), %eax
+ ; CHECK0: leal 12(%esp), %ecx
+ ; CHECK0: movl 8(%ecx), %ecx
+ ; CHECK0: popl %eax
+ ; CHECK0: popl %ecx
+ ; CHECK0: addl $4, %esp
+ ; CHECK0: iretl
+ %pflags = getelementptr inbounds %struct.interrupt_frame, %struct.interrupt_frame* %frame, i32 0, i32 2
+ %flags = load i32, i32* %pflags, align 4
+ call x86_fastcallcc void asm sideeffect "", "r,r"(i32 %flags, i32 %ecode)
+ ret void
+}
+
+; All clobbered registers must be saved
+define x86_intrcc void @test_isr_clobbers(%struct.interrupt_frame* %frame, i32 %ecode) {
+ call void asm sideeffect "", "~{eax},~{ebx},~{ebp}"()
+ ; CHECK-LABEL: test_isr_clobbers
+ ; CHECK-SSE-NEXT: pushl %ebp
+ ; CHECK-SSE-NEXT: pushl %ebx
+ ; CHECK-SSE-NEXT; pushl %eax
+ ; CHECK-SSE-NEXT: popl %eax
+ ; CHECK-SSE-NEXT: popl %ebx
+ ; CHECK-SSE-NEXT: popl %ebp
+ ; CHECK-SSE-NEXT: addl $4, %esp
+ ; CHECK-SSE-NEXT: iretl
+ ; CHECK0-LABEL: test_isr_clobbers
+ ; CHECK0-SSE-NEXT: pushl %ebp
+ ; CHECK0-SSE-NEXT: pushl %ebx
+ ; CHECK0-SSE-NEXT; pushl %eax
+ ; CHECK0-SSE-NEXT: popl %eax
+ ; CHECK0-SSE-NEXT: popl %ebx
+ ; CHECK0-SSE-NEXT: popl %ebp
+ ; CHECK0-SSE-NEXT: addl $4, %esp
+ ; CHECK0-SSE-NEXT: iretl
+ ret void
+}
+
diff --git a/test/CodeGen/X86/x86-64-intrcc.ll b/test/CodeGen/X86/x86-64-intrcc.ll new file mode 100644 index 00000000000..8f70b391fa1 --- /dev/null +++ b/test/CodeGen/X86/x86-64-intrcc.ll @@ -0,0 +1,86 @@ +; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-unknown-unknown -O0 < %s | FileCheck %s -check-prefix=CHECK0
+
+%struct.interrupt_frame = type { i64, i64, i64, i64, i64 }
+
+@llvm.used = appending global [3 x i8*] [i8* bitcast (void (%struct.interrupt_frame*)* @test_isr_no_ecode to i8*), i8* bitcast (void (%struct.interrupt_frame*, i64)* @test_isr_ecode to i8*), i8* bitcast (void (%struct.interrupt_frame*, i64)* @test_isr_clobbers to i8*)], section "llvm.metadata"
+
+; Spills rax, putting original esp at +8.
+; No stack adjustment if declared with no error code
+define x86_intrcc void @test_isr_no_ecode(%struct.interrupt_frame* %frame) {
+ ; CHECK-LABEL: test_isr_no_ecode:
+ ; CHECK: pushq %rax
+ ; CHECK: movq 24(%rsp), %rax
+ ; CHECK: popq %rax
+ ; CHECK: iretq
+ ; CHECK0-LABEL: test_isr_no_ecode:
+ ; CHECK0: pushq %rax
+ ; CHECK0: leaq 8(%rsp), %rax
+ ; CHECK0: movq 16(%rax), %rax
+ ; CHECK0: popq %rax
+ ; CHECK0: iretq
+ %pflags = getelementptr inbounds %struct.interrupt_frame, %struct.interrupt_frame* %frame, i32 0, i32 2
+ %flags = load i64, i64* %pflags, align 4
+ call void asm sideeffect "", "r"(i64 %flags)
+ ret void
+}
+
+; Spills rax and rcx, putting original rsp at +16. Stack is adjusted up another 8 bytes
+; before return, popping the error code.
+define x86_intrcc void @test_isr_ecode(%struct.interrupt_frame* %frame, i64 %ecode) {
+ ; CHECK-LABEL: test_isr_ecode
+ ; CHECK: pushq %rax
+ ; CHECK: pushq %rcx
+ ; CHECK: movq 16(%rsp), %rax
+ ; CHECK: movq 40(%rsp), %rcx
+ ; CHECK: popq %rcx
+ ; CHECK: popq %rax
+ ; CHECK: addq $8, %rsp
+ ; CHECK: iretq
+ ; CHECK0-LABEL: test_isr_ecode
+ ; CHECK0: pushq %rax
+ ; CHECK0: pushq %rcx
+ ; CHECK0: movq 16(%rsp), %rax
+ ; CHECK0: leaq 24(%rsp), %rcx
+ ; CHECK0: movq 16(%rcx), %rcx
+ ; CHECK0: popq %rcx
+ ; CHECK0: popq %rax
+ ; CHECK0: addq $8, %rsp
+ ; CHECK0: iretq
+ %pflags = getelementptr inbounds %struct.interrupt_frame, %struct.interrupt_frame* %frame, i32 0, i32 2
+ %flags = load i64, i64* %pflags, align 4
+ call void asm sideeffect "", "r,r"(i64 %flags, i64 %ecode)
+ ret void
+}
+
+; All clobbered registers must be saved
+define x86_intrcc void @test_isr_clobbers(%struct.interrupt_frame* %frame, i64 %ecode) {
+ call void asm sideeffect "", "~{rax},~{rbx},~{rbp},~{r11},~{xmm0}"()
+ ; CHECK-LABEL: test_isr_clobbers
+ ; CHECK-SSE-NEXT: pushq %rax
+ ; CHECK-SSE-NEXT; pushq %r11
+ ; CHECK-SSE-NEXT: pushq %rbp
+ ; CHECK-SSE-NEXT: pushq %rbx
+ ; CHECK-SSE-NEXT: movaps %xmm0
+ ; CHECK-SSE-NEXT: movaps %xmm0
+ ; CHECK-SSE-NEXT: popq %rbx
+ ; CHECK-SSE-NEXT: popq %rbp
+ ; CHECK-SSE-NEXT: popq %r11
+ ; CHECK-SSE-NEXT: popq %rax
+ ; CHECK-SSE-NEXT: addq $8, %rsp
+ ; CHECK-SSE-NEXT: iretq
+ ; CHECK0-LABEL: test_isr_clobbers
+ ; CHECK0-SSE-NEXT: pushq %rax
+ ; CHECK0-SSE-NEXT; pushq %r11
+ ; CHECK0-SSE-NEXT: pushq %rbp
+ ; CHECK0-SSE-NEXT: pushq %rbx
+ ; CHECK0-SSE-NEXT: movaps %xmm0
+ ; CHECK0-SSE-NEXT: movaps %xmm0
+ ; CHECK0-SSE-NEXT: popq %rbx
+ ; CHECK0-SSE-NEXT: popq %rbp
+ ; CHECK0-SSE-NEXT: popq %r11
+ ; CHECK0-SSE-NEXT: popq %rax
+ ; CHECK0-SSE-NEXT: addq $8, %rsp
+ ; CHECK0-SSE-NEXT: iretq
+ ret void
+}
\ No newline at end of file |