diff options
author | Micah Villmow <villmow@gmail.com> | 2012-10-11 21:27:41 +0000 |
---|---|---|
committer | Micah Villmow <villmow@gmail.com> | 2012-10-11 21:27:41 +0000 |
commit | fb384d61c78b60787ed65475d8403aee65023962 (patch) | |
tree | 7bf869f21ac85ae4ddbeea3069784c88381bd07e | |
parent | d9a3bad4487dee0b9ed1a0f5555dffe605826158 (diff) |
Revert 165732 for further review.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@165747 91177308-0d34-0410-b5e6-96231b3b80d8
40 files changed, 109 insertions, 188 deletions
diff --git a/include/llvm-c/Target.h b/include/llvm-c/Target.h index 57abfa0207f..92228701e31 100644 --- a/include/llvm-c/Target.h +++ b/include/llvm-c/Target.h @@ -172,20 +172,10 @@ enum LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef); See the method llvm::DataLayout::getPointerSize. */ unsigned LLVMPointerSize(LLVMTargetDataRef); -/** Returns the pointer size in bytes for a target for a specified - address space. - See the method llvm::DataLayout::getPointerSize. */ -unsigned LLVMPointerSizeForAS(LLVMTargetDataRef, unsigned AS); - /** Returns the integer type that is the same size as a pointer on a target. See the method llvm::DataLayout::getIntPtrType. */ LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef); -/** Returns the integer type that is the same size as a pointer on a target. - This version allows the address space to be specified. - See the method llvm::DataLayout::getIntPtrType. */ -LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef, unsigned AS); - /** Computes the size of a type in bytes for a target. See the method llvm::DataLayout::getTypeSizeInBits. */ unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef, LLVMTypeRef); diff --git a/include/llvm/DataLayout.h b/include/llvm/DataLayout.h index c9ac0b7feaa..a24737e842b 100644 --- a/include/llvm/DataLayout.h +++ b/include/llvm/DataLayout.h @@ -231,7 +231,9 @@ public: } /// Layout pointer alignment - unsigned getPointerABIAlignment(unsigned AS) const { + /// FIXME: The defaults need to be removed once all of + /// the backends/clients are updated. + unsigned getPointerABIAlignment(unsigned AS = 0) const { DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS); if (val == Pointers.end()) { val = Pointers.find(0); @@ -239,7 +241,9 @@ public: return val->second.ABIAlign; } /// Return target's alignment for stack-based pointers - unsigned getPointerPrefAlignment(unsigned AS) const { + /// FIXME: The defaults need to be removed once all of + /// the backends/clients are updated. + unsigned getPointerPrefAlignment(unsigned AS = 0) const { DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS); if (val == Pointers.end()) { val = Pointers.find(0); @@ -247,7 +251,9 @@ public: return val->second.PrefAlign; } /// Layout pointer size - unsigned getPointerSize(unsigned AS) const { + /// FIXME: The defaults need to be removed once all of + /// the backends/clients are updated. + unsigned getPointerSize(unsigned AS = 0) const { DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS); if (val == Pointers.end()) { val = Pointers.find(0); @@ -255,7 +261,9 @@ public: return val->second.TypeBitWidth; } /// Layout pointer size, in bits - unsigned getPointerSizeInBits(unsigned AS) const { + /// FIXME: The defaults need to be removed once all of + /// the backends/clients are updated. + unsigned getPointerSizeInBits(unsigned AS = 0) const { DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS); if (val == Pointers.end()) { val = Pointers.find(0); diff --git a/include/llvm/Instructions.h b/include/llvm/Instructions.h index 9c526bd97bb..6837608b2c0 100644 --- a/include/llvm/Instructions.h +++ b/include/llvm/Instructions.h @@ -350,16 +350,7 @@ public: static unsigned getPointerOperandIndex() { return 1U; } unsigned getPointerAddressSpace() const { - if (getPointerOperand()->getType()->isPointerTy()) - return cast<PointerType>(getPointerOperand()->getType()) - ->getAddressSpace(); - if (getPointerOperand()->getType()->isVectorTy() - && cast<VectorType>(getPointerOperand()->getType())->isPointerTy()) - return cast<PointerType>(cast<VectorType>( - getPointerOperand()->getType())->getElementType()) - ->getAddressSpace(); - llvm_unreachable("Only a vector of pointers or pointers can be used!"); - return 0; + return cast<PointerType>(getPointerOperand()->getType())->getAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: @@ -3662,15 +3653,7 @@ public: /// @brief return the address space of the pointer. unsigned getAddressSpace() const { - if (getType()->isPointerTy()) - return cast<PointerType>(getType())->getAddressSpace(); - if (getType()->isVectorTy() && - cast<VectorType>(getType())->getElementType()->isPointerTy()) - return cast<PointerType>( - cast<VectorType>(getType())->getElementType()) - ->getAddressSpace(); - llvm_unreachable("Must be a pointer or a vector of pointers."); - return 0; + return cast<PointerType>(getType())->getAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: @@ -3712,16 +3695,7 @@ public: /// @brief return the address space of the pointer. unsigned getPointerAddressSpace() const { - Type *Ty = getOperand(0)->getType(); - if (Ty->isPointerTy()) - return cast<PointerType>(Ty)->getAddressSpace(); - if (Ty->isVectorTy() - && cast<VectorType>(Ty)->getElementType()->isPointerTy()) - return cast<PointerType>( - cast<VectorType>(Ty)->getElementType()) - ->getAddressSpace(); - llvm_unreachable("Must be a pointer or a vector of pointers."); - return 0; + return cast<PointerType>(getOperand(0)->getType())->getAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 183ccbd48ec..b3149e960a8 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -146,7 +146,7 @@ public: // Return the pointer type for the given address space, defaults to // the pointer type from the data layout. // FIXME: The default needs to be removed once all the code is updated. - virtual MVT getPointerTy(uint32_t AS = 0) const { return PointerTy; } + virtual MVT getPointerTy(uint32_t addrspace = 0) const { return PointerTy; } virtual MVT getShiftAmountTy(EVT LHSTy) const; /// isSelectExpensive - Return true if the select operation is expensive for diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h index fd1b5556ef2..21dd3fbe110 100644 --- a/include/llvm/Transforms/Utils/Local.h +++ b/include/llvm/Transforms/Utils/Local.h @@ -186,8 +186,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP, bool isInBounds = cast<GEPOperator>(GEP)->isInBounds() && !NoAssumptions; // Build a mask for high order bits. - unsigned AS = cast<GEPOperator>(GEP)->getPointerAddressSpace(); - unsigned IntPtrWidth = TD.getPointerSizeInBits(AS); + unsigned IntPtrWidth = TD.getPointerSizeInBits(); uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth); for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index 36903f94e25..263bfc031fc 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -286,8 +286,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, V = GEPOp->getOperand(0); continue; } - - unsigned AS = GEPOp->getPointerAddressSpace(); + // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. gep_type_iterator GTI = gep_type_begin(GEPOp); for (User::const_op_iterator I = GEPOp->op_begin()+1, @@ -316,7 +315,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, // If the integer type is smaller than the pointer size, it is implicitly // sign extended to pointer size. unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth(); - if (TD->getPointerSizeInBits(AS) > Width) + if (TD->getPointerSizeInBits() > Width) Extension = EK_SignExt; // Use GetLinearExpression to decompose the index into a C1*V+C2 form. @@ -345,7 +344,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, // Make sure that we have a scale that makes sense for this target's // pointer size. - if (unsigned ShiftBits = 64-TD->getPointerSizeInBits(AS)) { + if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) { Scale <<= ShiftBits; Scale = (int64_t)Scale >> ShiftBits; } diff --git a/lib/Analysis/CodeMetrics.cpp b/lib/Analysis/CodeMetrics.cpp index d6692684960..651a54be1b9 100644 --- a/lib/Analysis/CodeMetrics.cpp +++ b/lib/Analysis/CodeMetrics.cpp @@ -91,16 +91,14 @@ bool llvm::isInstructionFree(const Instruction *I, const DataLayout *TD) { // which doesn't contain values outside the range of a pointer. if (isa<IntToPtrInst>(CI) && TD && TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) && - Op->getType()->getScalarSizeInBits() <= TD->getPointerSizeInBits( - cast<IntToPtrInst>(CI)->getAddressSpace())) + Op->getType()->getScalarSizeInBits() <= TD->getPointerSizeInBits()) return true; // A ptrtoint cast is free so long as the result is large enough to store // the pointer, and a legal integer type. if (isa<PtrToIntInst>(CI) && TD && TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) && - Op->getType()->getScalarSizeInBits() >= TD->getPointerSizeInBits( - cast<PtrToIntInst>(CI)->getPointerAddressSpace())) + Op->getType()->getScalarSizeInBits() >= TD->getPointerSizeInBits()) return true; // trunc to a native type is free (assuming the target has compare and diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index 146897ad675..b7bf044a368 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -916,11 +916,10 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, if (TD && CE->getOpcode() == Instruction::IntToPtr) { Constant *Input = CE->getOperand(0); unsigned InWidth = Input->getType()->getScalarSizeInBits(); - unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace(); - if (TD->getPointerSizeInBits(AS) < InWidth) { + if (TD->getPointerSizeInBits() < InWidth) { Constant *Mask = ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth, - TD->getPointerSizeInBits(AS))); + TD->getPointerSizeInBits())); Input = ConstantExpr::getAnd(Input, Mask); } // Do a zext or trunc to get to the dest size. @@ -933,10 +932,9 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, // the int size is >= the ptr size. This requires knowing the width of a // pointer, so it can't be done in ConstantExpr::getCast. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) - if (TD && CE->getOpcode() == Instruction::PtrToInt && - TD->getPointerSizeInBits( - cast<PointerType>(CE->getOperand(0)->getType())->getAddressSpace()) - <= CE->getType()->getScalarSizeInBits()) + if (TD && + TD->getPointerSizeInBits() <= CE->getType()->getScalarSizeInBits() && + CE->getOpcode() == Instruction::PtrToInt) return FoldBitCast(CE->getOperand(0), DestTy, *TD); return ConstantExpr::getCast(Opcode, Ops[0], DestTy); diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp index 95e58022ca1..5f51f775f14 100644 --- a/lib/Analysis/InlineCost.cpp +++ b/lib/Analysis/InlineCost.cpp @@ -243,8 +243,7 @@ bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { if (!TD) return false; - unsigned AS = GEP.getPointerAddressSpace(); - unsigned IntPtrWidth = TD->getPointerSizeInBits(AS); + unsigned IntPtrWidth = TD->getPointerSizeInBits(); assert(IntPtrWidth == Offset.getBitWidth()); for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); @@ -392,8 +391,7 @@ bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) { // Track base/offset pairs when converted to a plain integer provided the // integer is large enough to represent the pointer. unsigned IntegerSize = I.getType()->getScalarSizeInBits(); - unsigned AS = I.getPointerAddressSpace(); - if (TD && IntegerSize >= TD->getPointerSizeInBits(AS)) { + if (TD && IntegerSize >= TD->getPointerSizeInBits()) { std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(I.getOperand(0)); if (BaseAndOffset.first) @@ -427,8 +425,7 @@ bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) { // modifications provided the integer is not too large. Value *Op = I.getOperand(0); unsigned IntegerSize = Op->getType()->getScalarSizeInBits(); - unsigned AS = I.getAddressSpace(); - if (TD && IntegerSize <= TD->getPointerSizeInBits(AS)) { + if (TD && IntegerSize <= TD->getPointerSizeInBits()) { std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op); if (BaseAndOffset.first) ConstantOffsetPtrs[&I] = BaseAndOffset; @@ -763,8 +760,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { if (!TD || !V->getType()->isPointerTy()) return 0; - unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();; - unsigned IntPtrWidth = TD->getPointerSizeInBits(AS); + unsigned IntPtrWidth = TD->getPointerSizeInBits(); APInt Offset = APInt::getNullValue(IntPtrWidth); // Even though we don't look through PHI nodes, we could be called on an @@ -828,8 +824,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) { // size of the byval type by the target's pointer size. PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType()); unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType()); - unsigned AS = PTy->getAddressSpace(); - unsigned PointerSize = TD->getPointerSizeInBits(AS); + unsigned PointerSize = TD->getPointerSizeInBits(); // Ceiling division. unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize; diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp index 8e326122fa5..b3d62487fc1 100644 --- a/lib/Analysis/InstructionSimplify.cpp +++ b/lib/Analysis/InstructionSimplify.cpp @@ -666,8 +666,7 @@ Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, /// 'Offset' APInt must be the bitwidth of the target's pointer size. static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP, APInt &Offset) { - unsigned AS = GEP->getPointerAddressSpace(); - unsigned IntPtrWidth = TD.getPointerSizeInBits(AS); + unsigned IntPtrWidth = TD.getPointerSizeInBits(); assert(IntPtrWidth == Offset.getBitWidth()); gep_type_iterator GTI = gep_type_begin(GEP); @@ -697,14 +696,12 @@ static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP, /// accumulates the total constant offset applied in the returned constant. It /// returns 0 if V is not a pointer, and returns the constant '0' if there are /// no constant offsets applied. -/// FIXME: This function also exists in InlineCost.cpp. static Constant *stripAndComputeConstantOffsets(const DataLayout &TD, Value *&V) { if (!V->getType()->isPointerTy()) return 0; - unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();; - unsigned IntPtrWidth = TD.getPointerSizeInBits(AS); + unsigned IntPtrWidth = TD.getPointerSizeInBits(); APInt Offset = APInt::getNullValue(IntPtrWidth); // Even though we don't look through PHI nodes, we could be called on an @@ -1880,9 +1877,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input // if the integer type is the same size as the pointer type. if (MaxRecurse && Q.TD && isa<PtrToIntInst>(LI) && - Q.TD->getPointerSizeInBits( - cast<PtrToIntInst>(LI)->getPointerAddressSpace()) == - DstTy->getPrimitiveSizeInBits()) { + Q.TD->getPointerSizeInBits() == DstTy->getPrimitiveSizeInBits()) { if (Constant *RHSC = dyn_cast<Constant>(RHS)) { // Transfer the cast to the constant. if (Value *V = SimplifyICmpInst(Pred, SrcOp, diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 1d7f0692cbe..951b442b874 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -40,8 +40,7 @@ static unsigned getBitWidth(Type *Ty, const DataLayout *TD) { if (unsigned BitWidth = Ty->getScalarSizeInBits()) return BitWidth; assert(isa<PointerType>(Ty) && "Expected a pointer type!"); - return TD ? - TD->getPointerSizeInBits(cast<PointerType>(Ty)->getAddressSpace()) : 0; + return TD ? TD->getPointerSizeInBits() : 0; } static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, @@ -1622,8 +1621,7 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, // Re-sign extend from the pointer size if needed to get overflow edge cases // right. - unsigned AS = GEP->getPointerAddressSpace(); - unsigned PtrSize = TD.getPointerSizeInBits(AS); + unsigned PtrSize = TD.getPointerSizeInBits(); if (PtrSize < 64) Offset = SignExtend64(Offset, PtrSize); diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 4de98da655b..d74a70362a2 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -385,8 +385,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) { // - __tlv_bootstrap - used to make sure support exists // - spare pointer, used when mapped by the runtime // - pointer to mangled symbol above with initializer - unsigned AS = GV->getType()->getAddressSpace(); - unsigned PtrSize = TD->getPointerSizeInBits(AS)/8; + unsigned PtrSize = TD->getPointerSizeInBits()/8; OutStreamer.EmitSymbolValue(GetExternalSymbolSymbol("_tlv_bootstrap"), PtrSize, 0); OutStreamer.EmitIntValue(0, PtrSize, 0); @@ -1300,7 +1299,7 @@ void AsmPrinter::EmitXXStructorList(const Constant *List, bool isCtor) { // Emit the function pointers in the target-specific order const DataLayout *TD = TM.getDataLayout(); - unsigned Align = Log2_32(TD->getPointerPrefAlignment(0)); + unsigned Align = Log2_32(TD->getPointerPrefAlignment()); std::stable_sort(Structors.begin(), Structors.end(), priority_order); for (unsigned i = 0, e = Structors.size(); i != e; ++i) { const MCSection *OutputSection = @@ -1481,9 +1480,8 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) { if (Offset == 0) return Base; - unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace(); // Truncate/sext the offset to the pointer size. - unsigned Width = TD.getPointerSizeInBits(AS); + unsigned Width = TD.getPointerSizeInBits(); if (Width < 64) Offset = SignExtend64(Offset, Width); diff --git a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp index 6c17af2e8c8..d94e1fe61bf 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp @@ -112,7 +112,7 @@ unsigned AsmPrinter::GetSizeOfEncodedValue(unsigned Encoding) const { switch (Encoding & 0x07) { default: llvm_unreachable("Invalid encoded value."); - case dwarf::DW_EH_PE_absptr: return TM.getDataLayout()->getPointerSize(0); + case dwarf::DW_EH_PE_absptr: return TM.getDataLayout()->getPointerSize(); case dwarf::DW_EH_PE_udata2: return 2; case dwarf::DW_EH_PE_udata4: return 4; case dwarf::DW_EH_PE_udata8: return 8; diff --git a/lib/CodeGen/AsmPrinter/DIE.cpp b/lib/CodeGen/AsmPrinter/DIE.cpp index 73e18cd817b..4d73b3c2226 100644 --- a/lib/CodeGen/AsmPrinter/DIE.cpp +++ b/lib/CodeGen/AsmPrinter/DIE.cpp @@ -200,7 +200,7 @@ void DIEInteger::EmitValue(AsmPrinter *Asm, unsigned Form) const { case dwarf::DW_FORM_udata: Asm->EmitULEB128(Integer); return; case dwarf::DW_FORM_sdata: Asm->EmitSLEB128(Integer); return; case dwarf::DW_FORM_addr: - Size = Asm->getDataLayout().getPointerSize(0); break; + Size = Asm->getDataLayout().getPointerSize(); break; default: llvm_unreachable("DIE Value form not supported yet"); } Asm->OutStreamer.EmitIntValue(Integer, Size, 0/*addrspace*/); @@ -222,7 +222,7 @@ unsigned DIEInteger::SizeOf(AsmPrinter *AP, unsigned Form) const { case dwarf::DW_FORM_data8: return sizeof(int64_t); case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer); case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer); - case dwarf::DW_FORM_addr: return AP->getDataLayout().getPointerSize(0); + case dwarf::DW_FORM_addr: return AP->getDataLayout().getPointerSize(); default: llvm_unreachable("DIE Value form not supported yet"); } } @@ -249,7 +249,7 @@ void DIELabel::EmitValue(AsmPrinter *AP, unsigned Form) const { unsigned DIELabel::SizeOf(AsmPrinter *AP, unsigned Form) const { if (Form == dwarf::DW_FORM_data4) return 4; if (Form == dwarf::DW_FORM_strp) return 4; - return AP->getDataLayout().getPointerSize(0); + return AP->getDataLayout().getPointerSize(); } #ifndef NDEBUG @@ -273,7 +273,7 @@ void DIEDelta::EmitValue(AsmPrinter *AP, unsigned Form) const { unsigned DIEDelta::SizeOf(AsmPrinter *AP, unsigned Form) const { if (Form == dwarf::DW_FORM_data4) return 4; if (Form == dwarf::DW_FORM_strp) return 4; - return AP->getDataLayout().getPointerSize(0); + return AP->getDataLayout().getPointerSize(); } #ifndef NDEBUG diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index df162e07a88..6acf19ee8c4 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -384,7 +384,7 @@ DIE *DwarfDebug::constructLexicalScopeDIE(CompileUnit *TheCU, // DW_AT_ranges appropriately. TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4, DebugRangeSymbols.size() - * Asm->getDataLayout().getPointerSize(0)); + * Asm->getDataLayout().getPointerSize()); for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(), RE = Ranges.end(); RI != RE; ++RI) { DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first)); @@ -450,7 +450,7 @@ DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU, // DW_AT_ranges appropriately. TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4, DebugRangeSymbols.size() - * Asm->getDataLayout().getPointerSize(0)); + * Asm->getDataLayout().getPointerSize()); for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(), RE = Ranges.end(); RI != RE; ++RI) { DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first)); @@ -1765,7 +1765,7 @@ void DwarfDebug::emitDebugInfo() { Asm->EmitSectionOffset(Asm->GetTempSymbol("abbrev_begin"), DwarfAbbrevSectionSym); Asm->OutStreamer.AddComment("Address Size (in bytes)"); - Asm->EmitInt8(Asm->getDataLayout().getPointerSize(0)); + Asm->EmitInt8(Asm->getDataLayout().getPointerSize()); emitDIE(Die); Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("info_end", TheCU->getID())); @@ -1811,14 +1811,14 @@ void DwarfDebug::emitEndOfLineMatrix(unsigned SectionEnd) { Asm->EmitInt8(0); Asm->OutStreamer.AddComment("Op size"); - Asm->EmitInt8(Asm->getDataLayout().getPointerSize(0) + 1); + Asm->EmitInt8(Asm->getDataLayout().getPointerSize() + 1); Asm->OutStreamer.AddComment("DW_LNE_set_address"); Asm->EmitInt8(dwarf::DW_LNE_set_address); Asm->OutStreamer.AddComment("Section end label"); Asm->OutStreamer.EmitSymbolValue(Asm->GetTempSymbol("section_end",SectionEnd), - Asm->getDataLayout().getPointerSize(0), + Asm->getDataLayout().getPointerSize(), 0/*AddrSpace*/); // Mark end of matrix. @@ -2047,7 +2047,7 @@ void DwarfDebug::emitDebugLoc() { // Start the dwarf loc section. Asm->OutStreamer.SwitchSection( Asm->getObjFileLowering().getDwarfLocSection()); - unsigned char Size = Asm->getDataLayout().getPointerSize(0); + unsigned char Size = Asm->getDataLayout().getPointerSize(); Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", 0)); unsigned index = 1; for (SmallVector<DotDebugLocEntry, 4>::iterator @@ -2144,7 +2144,7 @@ void DwarfDebug::emitDebugRanges() { // Start the dwarf ranges section. Asm->OutStreamer.SwitchSection( Asm->getObjFileLowering().getDwarfRangesSection()); - unsigned char Size = Asm->getDataLayout().getPointerSize(0); + unsigned char Size = Asm->getDataLayout().getPointerSize(); for (SmallVector<const MCSymbol *, 8>::iterator I = DebugRangeSymbols.begin(), E = DebugRangeSymbols.end(); I != E; ++I) { @@ -2202,7 +2202,7 @@ void DwarfDebug::emitDebugInlineInfo() { Asm->OutStreamer.AddComment("Dwarf Version"); Asm->EmitInt16(dwarf::DWARF_VERSION); Asm->OutStreamer.AddComment("Address Size (in bytes)"); - Asm->EmitInt8(Asm->getDataLayout().getPointerSize(0)); + Asm->EmitInt8(Asm->getDataLayout().getPointerSize()); for (SmallVector<const MDNode *, 4>::iterator I = InlinedSPNodes.begin(), E = InlinedSPNodes.end(); I != E; ++I) { @@ -2233,7 +2233,7 @@ void DwarfDebug::emitDebugInlineInfo() { if (Asm->isVerbose()) Asm->OutStreamer.AddComment("low_pc"); Asm->OutStreamer.EmitSymbolValue(LI->first, - Asm->getDataLayout().getPointerSize(0),0); + Asm->getDataLayout().getPointerSize(),0); } } diff --git a/lib/CodeGen/AsmPrinter/DwarfException.cpp b/lib/CodeGen/AsmPrinter/DwarfException.cpp index 31d07141a1d..08fb6b3f52c 100644 --- a/lib/CodeGen/AsmPrinter/DwarfException.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfException.cpp @@ -417,7 +417,7 @@ void DwarfException::EmitExceptionTable() { // that we're omitting that bit. TTypeEncoding = dwarf::DW_EH_PE_omit; // dwarf::DW_EH_PE_absptr - TypeFormatSize = Asm->getDataLayout().getPointerSize(0); + TypeFormatSize = Asm->getDataLayout().getPointerSize(); } else { // Okay, we have actual filters or typeinfos to emit. As such, we need to // pick a type encoding for them. We're about to emit a list of pointers to diff --git a/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp b/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp index d0e27d1d04d..f7c011968c2 100644 --- a/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp @@ -91,7 +91,7 @@ void OcamlGCMetadataPrinter::beginAssembly(AsmPrinter &AP) { /// either condition is detected in a function which uses the GC. /// void OcamlGCMetadataPrinter::finishAssembly(AsmPrinter &AP) { - unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize(0); + unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize(); AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getTextSection()); EmitCamlGlobal(getModule(), AP, "code_end"); diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp index f11785070bb..91d52118576 100644 --- a/lib/CodeGen/MachineFunction.cpp +++ b/lib/CodeGen/MachineFunction.cpp @@ -550,7 +550,7 @@ unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const { // address of a block, in which case it is the pointer size. switch (getEntryKind()) { case MachineJumpTableInfo::EK_BlockAddress: - return TD.getPointerSize(0); + return TD.getPointerSize(); case MachineJumpTableInfo::EK_GPRel64BlockAddress: return 8; case MachineJumpTableInfo::EK_GPRel32BlockAddress: @@ -570,7 +570,7 @@ unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const { // alignment. switch (getEntryKind()) { case MachineJumpTableInfo::EK_BlockAddress: - return TD.getPointerABIAlignment(0); + return TD.getPointerABIAlignment(); case MachineJumpTableInfo::EK_GPRel64BlockAddress: return TD.getABIIntegerTypeAlignment(64); case MachineJumpTableInfo::EK_GPRel32BlockAddress: diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 183416f3fd2..79cfcdfe0ea 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -3449,12 +3449,9 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, IsZeroVal, MemcpyStrSrc, DAG.getMachineFunction()); - Type *vtType = VT.isExtended() ? VT.getTypeForEVT(*DAG.getContext()) : NULL; - unsigned AS = (vtType && vtType->isPointerTy()) ? - cast<PointerType>(vtType)->getAddressSpace() : 0; if (VT == MVT::Other) { - if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) || + if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() || TLI.allowsUnalignedMemoryAccesses(VT)) { VT = TLI.getPointerTy(); } else { diff --git a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp index bf26a6d5920..8f5d770f665 100644 --- a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp +++ b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp @@ -77,9 +77,9 @@ void TargetLoweringObjectFileELF::emitPersonalityValue(MCStreamer &Streamer, Flags, SectionKind::getDataRel(), 0, Label->getName()); - unsigned Size = TM.getDataLayout()->getPointerSize(0); + unsigned Size = TM.getDataLayout()->getPointerSize(); Streamer.SwitchSection(Sec); - Streamer.EmitValueToAlignment(TM.getDataLayout()->getPointerABIAlignment(0)); + Streamer.EmitValueToAlignment(TM.getDataLayout()->getPointerABIAlignment()); Streamer.EmitSymbolAttribute(Label, MCSA_ELF_TypeObject); const MCExpr *E = MCConstantExpr::Create(Size, getContext()); Streamer.EmitELFSize(Label, E); diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp index 480f2c99705..c5c46815a28 100644 --- a/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/lib/ExecutionEngine/ExecutionEngine.cpp @@ -17,7 +17,6 @@ #include "llvm/Constants.h" #include "llvm/DerivedTypes.h" -#include "llvm/Instructions.h" #include "llvm/Module.h" #include "llvm/ExecutionEngine/GenericValue.h" #include "llvm/ADT/SmallString.h" @@ -268,7 +267,7 @@ public: void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE, const std::vector<std::string> &InputArgv) { clear(); // Free the old contents. - unsigned PtrSize = EE->getDataLayout()->getPointerSize(0); + unsigned PtrSize = EE->getDataLayout()->getPointerSize(); Array = new char[(InputArgv.size()+1)*PtrSize]; DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array << "\n"); @@ -343,7 +342,7 @@ void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) { #ifndef NDEBUG /// isTargetNullPtr - Return whether the target pointer stored at Loc is null. static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) { - unsigned PtrSize = EE->getDataLayout()->getPointerSize(0); + unsigned PtrSize = EE->getDataLayout()->getPointerSize(); for (unsigned i = 0; i < PtrSize; ++i) if (*(i + (uint8_t*)Loc)) return false; @@ -645,15 +644,13 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) { } case Instruction::PtrToInt: { GenericValue GV = getConstantValue(Op0); - unsigned AS = cast<PtrToIntInst>(CE)->getPointerAddressSpace(); - uint32_t PtrWidth = TD->getPointerSizeInBits(AS); + uint32_t PtrWidth = TD->getPointerSizeInBits(); GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal)); return GV; } case Instruction::IntToPtr: { GenericValue GV = getConstantValue(Op0); - unsigned AS = cast<IntToPtrInst>(CE)->getAddressSpace(); - uint32_t PtrWidth = TD->getPointerSizeInBits(AS); + uint32_t PtrWidth = TD->getPointerSizeInBits(); if (PtrWidth != GV.IntVal.getBitWidth()) GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth); assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width"); diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp index 326bf79c589..5202b091654 100644 --- a/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -1054,8 +1054,7 @@ GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy, GenericValue Dest, Src = getOperandValue(SrcVal, SF); assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction"); - unsigned AS = cast<PointerType>(DstTy)->getAddressSpace(); - uint32_t PtrSize = TD.getPointerSizeInBits(AS); + uint32_t PtrSize = TD.getPointerSizeInBits(); if (PtrSize != Src.IntVal.getBitWidth()) Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize); diff --git a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp index f58adbe1e1a..e16e2d112a9 100644 --- a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp +++ b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp @@ -376,7 +376,7 @@ GenericValue lle_X_sprintf(FunctionType *FT, case 'x': case 'X': if (HowLong >= 1) { if (HowLong == 1 && - TheInterpreter->getDataLayout()->getPointerSizeInBits(0) == 64 && + TheInterpreter->getDataLayout()->getPointerSizeInBits() == 64 && sizeof(long) < sizeof(int64_t)) { // Make sure we use %lld with a 64 bit argument because we might be // compiling LLI on a 32 bit compiler. diff --git a/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp index bcd5b263654..19c197903a6 100644 --- a/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp +++ b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp @@ -14,9 +14,7 @@ #include "JIT.h" #include "JITDwarfEmitter.h" -#include "llvm/DerivedTypes.h" #include "llvm/Function.h" -#include "llvm/GlobalVariable.h" #include "llvm/ADT/DenseMap.h" #include "llvm/CodeGen/JITCodeEmitter.h" #include "llvm/CodeGen/MachineFunction.h" @@ -68,7 +66,7 @@ unsigned char* JITDwarfEmitter::EmitDwarfTable(MachineFunction& F, void JITDwarfEmitter::EmitFrameMoves(intptr_t BaseLabelPtr, const std::vector<MachineMove> &Moves) const { - unsigned PointerSize = TD->getPointerSize(0); + unsigned PointerSize = TD->getPointerSize(); int stackGrowth = stackGrowthDirection == TargetFrameLowering::StackGrowsUp ? PointerSize : -PointerSize; MCSymbol *BaseLabel = 0; @@ -380,7 +378,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF, for (unsigned i = 0, e = CallSites.size(); i < e; ++i) SizeSites += MCAsmInfo::getULEB128Size(CallSites[i].Action); - unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize(0); + unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize(); unsigned TypeOffset = sizeof(int8_t) + // Call site format // Call-site table length @@ -456,12 +454,12 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF, const GlobalVariable *GV = TypeInfos[M - 1]; if (GV) { - if (TD->getPointerSize(GV->getType()->getAddressSpace()) == sizeof(int32_t)) + if (TD->getPointerSize() == sizeof(int32_t)) JCE->emitInt32((intptr_t)Jit.getOrEmitGlobalVariable(GV)); else JCE->emitInt64((intptr_t)Jit.getOrEmitGlobalVariable(GV)); } else { - if (TD->getPointerSize(0) == sizeof(int32_t)) + if (TD->getPointerSize() == sizeof(int32_t)) JCE->emitInt32(0); else JCE->emitInt64(0); @@ -483,7 +481,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF, unsigned char* JITDwarfEmitter::EmitCommonEHFrame(const Function* Personality) const { - unsigned PointerSize = TD->getPointerSize(0); + unsigned PointerSize = TD->getPointerSize(); int stackGrowth = stackGrowthDirection == TargetFrameLowering::StackGrowsUp ? PointerSize : -PointerSize; @@ -543,7 +541,7 @@ JITDwarfEmitter::EmitEHFrame(const Function* Personality, unsigned char* StartFunction, unsigned char* EndFunction, unsigned char* ExceptionTable) const { - unsigned PointerSize = TD->getPointerSize(0); + unsigned PointerSize = TD->getPointerSize(); // EH frame header. unsigned char* StartEHPtr = (unsigned char*)JCE->getCurrentPCValue(); diff --git a/lib/Target/ARM/ARMELFWriterInfo.cpp b/lib/Target/ARM/ARMELFWriterInfo.cpp index 7bca0edf915..d88bf0c8fa1 100644 --- a/lib/Target/ARM/ARMELFWriterInfo.cpp +++ b/lib/Target/ARM/ARMELFWriterInfo.cpp @@ -26,7 +26,7 @@ using namespace llvm; //===----------------------------------------------------------------------===// ARMELFWriterInfo::ARMELFWriterInfo(TargetMachine &TM) - : TargetELFWriterInfo(TM.getDataLayout()->getPointerSizeInBits(0) == 64, + : TargetELFWriterInfo(TM.getDataLayout()->getPointerSizeInBits() == 64, TM.getDataLayout()->isLittleEndian()) { } diff --git a/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp b/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp index 6b575099e59..4ca30ba81f7 100644 --- a/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp +++ b/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp @@ -26,7 +26,7 @@ using namespace llvm; //===----------------------------------------------------------------------===// MBlazeELFWriterInfo::MBlazeELFWriterInfo(TargetMachine &TM) - : TargetELFWriterInfo(TM.getDataLayout()->getPointerSizeInBits(0) == 64, + : TargetELFWriterInfo(TM.getDataLayout()->getPointerSizeInBits() == 64, TM.getDataLayout()->isLittleEndian()) { } diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index 113378a5f31..fc677aec38e 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -881,7 +881,7 @@ MSP430TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { if (ReturnAddrIndex == 0) { // Set up a frame object for the return address. - uint64_t SlotSize = TD->getPointerSize(0); + uint64_t SlotSize = TD->getPointerSize(); ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, true); FuncInfo->setRAIndex(ReturnAddrIndex); @@ -901,7 +901,7 @@ SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op, if (Depth > 0) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); SDValue Offset = - DAG.getConstant(TD->getPointerSize(0), MVT::i16); + DAG.getConstant(TD->getPointerSize(), MVT::i16); return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), DAG.getNode(ISD::ADD, dl, getPointerTy(), FrameAddr, Offset), diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index 7146f6f7f80..d3dfb35e261 100644 --- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -126,9 +126,8 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) { return Base; // Truncate/sext the offset to the pointer size. - unsigned AS = cast<GetElementPtrInst>(CE)->getPointerAddressSpace(); - if (TD.getPointerSizeInBits(AS) != 64) { - int SExtAmount = 64-TD.getPointerSizeInBits(AS); + if (TD.getPointerSizeInBits() != 64) { + int SExtAmount = 64-TD.getPointerSizeInBits(); Offset = (Offset << SExtAmount) >> SExtAmount; } @@ -1379,7 +1378,7 @@ getOpenCLAlignment(const DataLayout *TD, const FunctionType *FTy = dyn_cast<FunctionType>(Ty); if (FTy) - return TD->getPointerPrefAlignment(0); + return TD->getPointerPrefAlignment(); return TD->getPrefTypeAlignment(Ty); } diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp index 58a3d2ca342..914a9b0dcea 100644 --- a/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -439,7 +439,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() { bool PPCLinuxAsmPrinter::doFinalization(Module &M) { const DataLayout *TD = TM.getDataLayout(); - bool isPPC64 = TD->getPointerSizeInBits(0) == 64; + bool isPPC64 = TD->getPointerSizeInBits() == 64; if (isPPC64 && !TOC.empty()) { const MCSectionELF *Section = OutStreamer.getContext().getELFSection(".toc", @@ -545,7 +545,7 @@ static MCSymbol *GetAnonSym(MCSymbol *Sym, MCContext &Ctx) { void PPCDarwinAsmPrinter:: EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) { - bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits(0) == 64; + bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64; const TargetLoweringObjectFileMachO &TLOFMacho = static_cast<const TargetLoweringObjectFileMachO &>(getObjFileLowering()); @@ -640,7 +640,7 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) { bool PPCDarwinAsmPrinter::doFinalization(Module &M) { - bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits(0) == 64; + bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64; // Darwin/PPC always uses mach-o. const TargetLoweringObjectFileMachO &TLOFMacho = diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp index d1232114732..459c3589d3f 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -498,7 +498,7 @@ PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, } else if (CRSpillFrameIdx) { FrameIdx = CRSpillFrameIdx; } else { - MachineFrameInfo *MFI = (const_cast<MachineFunction &>(MF)).getFrameInfo(); + MachineFrameInfo *MFI = ((MachineFunction &)MF).getFrameInfo(); FrameIdx = MFI->CreateFixedObject((uint64_t)4, (int64_t)-4, true); CRSpillFrameIdx = FrameIdx; } diff --git a/lib/Target/Target.cpp b/lib/Target/Target.cpp index 393178a4692..219cbf1afc9 100644 --- a/lib/Target/Target.cpp +++ b/lib/Target/Target.cpp @@ -56,21 +56,13 @@ LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef TD) { } unsigned LLVMPointerSize(LLVMTargetDataRef TD) { - return unwrap(TD)->getPointerSize(0); -} - -unsigned LLVMPointerSizeForAS(LLVMTargetDataRef TD, unsigned AS) { - return unwrap(TD)->getPointerSize(AS); + return unwrap(TD)->getPointerSize(); } LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef TD) { return wrap(unwrap(TD)->getIntPtrType(getGlobalContext())); } -LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS) { - return wrap(unwrap(TD)->getIntPtrType(getGlobalContext(), AS)); -} - unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return unwrap(TD)->getTypeSizeInBits(unwrap(Ty)); } diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp index 92afac62e65..c704ca17013 100644 --- a/lib/Target/X86/X86AsmPrinter.cpp +++ b/lib/Target/X86/X86AsmPrinter.cpp @@ -693,7 +693,7 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) { for (unsigned i = 0, e = Stubs.size(); i != e; ++i) { OutStreamer.EmitLabel(Stubs[i].first); OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(), - TD->getPointerSize(0), 0); + TD->getPointerSize(), 0); } Stubs.clear(); } diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index e7f817e3a98..8acef9dc7ba 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -317,7 +317,7 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF, bool HasFP = hasFP(MF); // Calculate amount of bytes used for return address storing. - int stackGrowth = -TD->getPointerSize(0); + int stackGrowth = -TD->getPointerSize(); // FIXME: This is dirty hack. The code itself is pretty mess right now. // It should be rewritten from scratch and generalized sometimes. @@ -717,7 +717,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { std::vector<MachineMove> &Moves = MMI.getFrameMoves(); const DataLayout *TD = MF.getTarget().getDataLayout(); uint64_t NumBytes = 0; - int stackGrowth = -TD->getPointerSize(0); + int stackGrowth = -TD->getPointerSize(); if (HasFP) { // Calculate required stack adjustment. diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 1293bbf3774..1580935fb2f 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -2652,7 +2652,7 @@ X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, unsigned StackAlignment = TFI.getStackAlignment(); uint64_t AlignMask = StackAlignment - 1; int64_t Offset = StackSize; - uint64_t SlotSize = TD->getPointerSize(0); + uint64_t SlotSize = TD->getPointerSize(); if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { // Number smaller than 12 so just add the difference. Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); @@ -3020,7 +3020,7 @@ SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { if (ReturnAddrIndex == 0) { // Set up a frame object for the return address. - uint64_t SlotSize = TD->getPointerSize(0); + uint64_t SlotSize = TD->getPointerSize(); ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, false); FuncInfo->setRAIndex(ReturnAddrIndex); @@ -7643,7 +7643,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(), false, false, false, 0); - SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize(0)), + SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()), getPointerTy()); IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale); @@ -10278,7 +10278,7 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, if (Depth > 0) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); SDValue Offset = - DAG.getConstant(TD->getPointerSize(0), + DAG.getConstant(TD->getPointerSize(), Subtarget->is64Bit() ? MVT::i64 : MVT::i32); return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), DAG.getNode(ISD::ADD, dl, getPointerTy(), @@ -10310,7 +10310,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const { - return DAG.getIntPtrConstant(2*TD->getPointerSize(0)); + return DAG.getIntPtrConstant(2*TD->getPointerSize()); } SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { @@ -10325,7 +10325,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, - DAG.getIntPtrConstant(TD->getPointerSize(0))); + DAG.getIntPtrConstant(TD->getPointerSize())); StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), false, false, 0); diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp index f3f3f8f585d..b59210a9df1 100644 --- a/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -1293,16 +1293,15 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) { // If the source integer type is not the intptr_t type for this target, do a // trunc or zext to the intptr_t type, then inttoptr of it. This allows the // cast to be exposed to other transforms. - unsigned AS = CI.getAddressSpace(); if (TD) { if (CI.getOperand(0)->getType()->getScalarSizeInBits() > - TD->getPointerSizeInBits(AS)) { + TD->getPointerSizeInBits()) { Value *P = Builder->CreateTrunc(CI.getOperand(0), TD->getIntPtrType(CI.getContext())); return new IntToPtrInst(P, CI.getType()); } if (CI.getOperand(0)->getType()->getScalarSizeInBits() < - TD->getPointerSizeInBits(AS)) { + TD->getPointerSizeInBits()) { Value *P = Builder->CreateZExt(CI.getOperand(0), TD->getIntPtrType(CI.getContext())); return new IntToPtrInst(P, CI.getType()); @@ -1369,14 +1368,13 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) { // If the destination integer type is not the intptr_t type for this target, // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast // to be exposed to other transforms. - unsigned AS = CI.getPointerAddressSpace(); if (TD) { - if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits(AS)) { + if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) { Value *P = Builder->CreatePtrToInt(CI.getOperand(0), TD->getIntPtrType(CI.getContext())); return new TruncInst(P, CI.getType()); } - if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits(AS)) { + if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits()) { Value *P = Builder->CreatePtrToInt(CI.getOperand(0), TD->getIntPtrType(CI.getContext())); return new ZExtInst(P, CI.getType()); diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp index e3e5ddae80b..4d5ffddc4c7 100644 --- a/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -365,12 +365,11 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV, // order the state machines in complexity of the generated code. Value *Idx = GEP->getOperand(2); - unsigned AS = GEP->getPointerAddressSpace(); // If the index is larger than the pointer size of the target, truncate the // index down like the GEP would do implicitly. We don't have to do this for // an inbounds GEP because the index can't be out of range. if (!GEP->isInBounds() && - Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits(AS)) + Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits()) Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext())); // If the comparison is only true for one or two elements, emit direct @@ -529,11 +528,10 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) { } } - unsigned AS = cast<GetElementPtrInst>(GEP)->getPointerAddressSpace(); // Okay, we know we have a single variable index, which must be a // pointer/array/vector index. If there is no offset, life is simple, return // the index. - unsigned IntPtrWidth = TD.getPointerSizeInBits(AS); + unsigned IntPtrWidth = TD.getPointerSizeInBits(); if (Offset == 0) { // Cast to intptrty in case a truncation occurs. If an extension is needed, // we don't need to bother extending: the extension won't affect where the @@ -1554,8 +1552,7 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) { // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the // integer type is the same size as the pointer type. if (TD && LHSCI->getOpcode() == Instruction::PtrToInt && - TD->getPointerSizeInBits( - cast<PtrToIntInst>(LHSCI)->getPointerAddressSpace()) == + TD->getPointerSizeInBits() == cast<IntegerType>(DestTy)->getBitWidth()) { Value *RHSOp = 0; if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) { diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 2fe3178f74f..10ab9cb6039 100644 --- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -742,7 +742,7 @@ bool AddressSanitizer::runOnModule(Module &M) { BL.reset(new BlackList(ClBlackListFile)); C = &(M.getContext()); - LongSize = TD->getPointerSizeInBits(0); + LongSize = TD->getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); IntptrPtrTy = PointerType::get(IntptrTy, 0); diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 97fff9edd68..517657cf526 100644 --- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -174,11 +174,10 @@ bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const { // this width can be stored. If so, check to see whether we will end up // actually reducing the number of stores used. unsigned Bytes = unsigned(End-Start); - unsigned AS = cast<StoreInst>(TheStores[0])->getPointerAddressSpace(); - unsigned NumPointerStores = Bytes/TD.getPointerSize(AS); + unsigned NumPointerStores = Bytes/TD.getPointerSize(); // Assume the remaining bytes if any are done a byte at a time. - unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(AS); + unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(); // If we will reduce the # stores (according to this heuristic), do the // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index f43bb96facc..ca762514929 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -447,7 +447,6 @@ protected: bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) { GEPOffset = Offset; - unsigned int AS = GEPI.getPointerAddressSpace(); for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI); GTI != GTE; ++GTI) { ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); @@ -477,7 +476,7 @@ protected: continue; } - APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits(AS)); + APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits()); Index *= APInt(Index.getBitWidth(), TD.getTypeAllocSize(GTI.getIndexedType())); Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset, @@ -1785,9 +1784,7 @@ static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD, break; if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) { ElementTy = SeqTy->getElementType(); - Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits( - ElementTy->isPointerTy() ? - cast<PointerType>(ElementTy)->getAddressSpace(): 0), 0))); + Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(), 0))); } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) { if (STy->element_begin() == STy->element_end()) break; // Nothing left to descend into. @@ -2242,8 +2239,7 @@ private: Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) { assert(BeginOffset >= NewAllocaBeginOffset); - unsigned AS = cast<PointerType>(PointerTy)->getAddressSpace(); - APInt Offset(TD.getPointerSizeInBits(AS), BeginOffset - NewAllocaBeginOffset); + APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset); return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName("")); } @@ -2582,10 +2578,8 @@ private: const AllocaPartitioning::MemTransferOffsets &MTO = P.getMemTransferOffsets(II); - assert(OldPtr->getType()->isPointerTy() && "Must be a pointer type!"); - unsigned AS = cast<PointerType>(OldPtr->getType())->getAddressSpace(); // Compute the relative offset within the transfer. - unsigned IntPtrWidth = TD.getPointerSizeInBits(AS); + unsigned IntPtrWidth = TD.getPointerSizeInBits(); APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin : MTO.SourceBegin)); diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp index 9729687a83e..a954d82c05b 100644 --- a/lib/Transforms/Utils/Local.cpp +++ b/lib/Transforms/Utils/Local.cpp @@ -806,8 +806,7 @@ unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, const DataLayout *TD) { assert(V->getType()->isPointerTy() && "getOrEnforceKnownAlignment expects a pointer!"); - unsigned AS = cast<PointerType>(V->getType())->getAddressSpace(); - unsigned BitWidth = TD ? TD->getPointerSizeInBits(AS) : 64; + unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64; APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); ComputeMaskedBits(V, KnownZero, KnownOne, TD); unsigned TrailZ = KnownZero.countTrailingOnes(); |