File clang-mainline-backports.patch of Package llvm
Backports of (parts of) the following clang mainline SVN revisions:
199308, 202088, 204613, 204626, 204627, 204669, 210279, 210337, 210340,
210384, 210389, 210391, 210449, 210657, 210928, 211359, 211360, 211370,
211778, 212734, 212743, 212757, 213494, 213495, 214801
Index: llvm-suse/tools/clang/lib/AST/ASTContext.cpp
===================================================================
--- llvm-suse.orig/tools/clang/lib/AST/ASTContext.cpp
+++ llvm-suse/tools/clang/lib/AST/ASTContext.cpp
@@ -1761,13 +1761,18 @@ unsigned ASTContext::getPreferredTypeAli
if (Target->getTriple().getArch() == llvm::Triple::xcore)
return ABIAlign; // Never overalign on XCore.
+ const TypedefType *TT = T->getAs<TypedefType>();
+
// Double and long long should be naturally aligned if possible.
if (const ComplexType* CT = T->getAs<ComplexType>())
T = CT->getElementType().getTypePtr();
if (T->isSpecificBuiltinType(BuiltinType::Double) ||
T->isSpecificBuiltinType(BuiltinType::LongLong) ||
T->isSpecificBuiltinType(BuiltinType::ULongLong))
- return std::max(ABIAlign, (unsigned)getTypeSize(T));
+ // Don't increase the alignment if an alignment attribute was specified on a
+ // typedef declaration.
+ if (!TT || !TT->getDecl()->getMaxAlignment())
+ return std::max(ABIAlign, (unsigned)getTypeSize(T));
return ABIAlign;
}
Index: llvm-suse/tools/clang/lib/Basic/Targets.cpp
===================================================================
--- llvm-suse.orig/tools/clang/lib/Basic/Targets.cpp
+++ llvm-suse/tools/clang/lib/Basic/Targets.cpp
@@ -691,8 +691,9 @@ public:
ArchDefinePwr6 = 1 << 9,
ArchDefinePwr6x = 1 << 10,
ArchDefinePwr7 = 1 << 11,
- ArchDefineA2 = 1 << 12,
- ArchDefineA2q = 1 << 13
+ ArchDefinePwr8 = 1 << 12,
+ ArchDefineA2 = 1 << 13,
+ ArchDefineA2q = 1 << 14
} ArchDefineTypes;
// Note: GCC recognizes the following additional cpus:
@@ -739,6 +740,8 @@ public:
.Case("pwr6x", true)
.Case("power7", true)
.Case("pwr7", true)
+ .Case("power8", true)
+ .Case("pwr8", true)
.Case("powerpc", true)
.Case("ppc", true)
.Case("powerpc64", true)
@@ -934,6 +937,7 @@ void PPCTargetInfo::getTargetDefines(con
if (getTriple().getArch() == llvm::Triple::ppc64le) {
Builder.defineMacro("_LITTLE_ENDIAN");
Builder.defineMacro("__LITTLE_ENDIAN__");
+ Builder.defineMacro("_CALL_ELF","2");
} else {
if (getTriple().getOS() != llvm::Triple::NetBSD &&
getTriple().getOS() != llvm::Triple::OpenBSD)
@@ -987,7 +991,10 @@ void PPCTargetInfo::getTargetDefines(con
| ArchDefinePpcsq)
.Case("pwr7", ArchDefineName | ArchDefinePwr6x | ArchDefinePwr6
| ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4
- | ArchDefinePwr6 | ArchDefinePpcgr | ArchDefinePpcsq)
+ | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr8", ArchDefineName | ArchDefinePwr7 | ArchDefinePwr6x
+ | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5
+ | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
.Case("power3", ArchDefinePpcgr)
.Case("power4", ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
.Case("power5", ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
@@ -1001,7 +1008,10 @@ void PPCTargetInfo::getTargetDefines(con
| ArchDefinePpcsq)
.Case("power7", ArchDefinePwr7 | ArchDefinePwr6x | ArchDefinePwr6
| ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4
- | ArchDefinePwr6 | ArchDefinePpcgr | ArchDefinePpcsq)
+ | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power8", ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6x
+ | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5
+ | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
.Default(ArchDefineNone);
if (defs & ArchDefineName)
@@ -1028,6 +1038,8 @@ void PPCTargetInfo::getTargetDefines(con
Builder.defineMacro("_ARCH_PWR6X");
if (defs & ArchDefinePwr7)
Builder.defineMacro("_ARCH_PWR7");
+ if (defs & ArchDefinePwr8)
+ Builder.defineMacro("_ARCH_PWR8");
if (defs & ArchDefineA2)
Builder.defineMacro("_ARCH_A2");
if (defs & ArchDefineA2q) {
@@ -1076,6 +1088,7 @@ void PPCTargetInfo::getDefaultFeatures(l
.Case("g5", true)
.Case("pwr6", true)
.Case("pwr7", true)
+ .Case("pwr8", true)
.Case("ppc64", true)
.Case("ppc64le", true)
.Default(false);
@@ -1244,10 +1257,17 @@ public:
DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:64:64-f32:32:32-f64:64:64-"
"v128:128:128-n32:64";
- } else
- DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
- "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
- "v128:128:128-n32:64";
+ } else {
+ if (Triple.getArch() == llvm::Triple::ppc64le)
+ DescriptionString = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
+ "v128:128:128-n32:64";
+ else
+ DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
+ "v128:128:128-n32:64";
+ }
+
// PPC64 supports atomics up to 8 bytes.
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
Index: llvm-suse/tools/clang/lib/CodeGen/TargetInfo.cpp
===================================================================
--- llvm-suse.orig/tools/clang/lib/CodeGen/TargetInfo.cpp
+++ llvm-suse/tools/clang/lib/CodeGen/TargetInfo.cpp
@@ -2796,11 +2796,24 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSi
namespace {
/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
+public:
+ enum ABIKind {
+ ELFv1 = 0,
+ ELFv2
+ };
+
+private:
+ static const unsigned GPRBits = 64;
+ ABIKind Kind;
public:
- PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+ PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
+ : DefaultABIInfo(CGT), Kind(Kind) {}
bool isPromotableTypeForABI(QualType Ty) const;
+ bool isAlignedParamType(QualType Ty) const;
+ bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ uint64_t &Members) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty) const;
@@ -2821,7 +2834,8 @@ public:
const Type *T = isSingleElementStruct(it->type, getContext());
if (T) {
const BuiltinType *BT = T->getAs<BuiltinType>();
- if (T->isVectorType() || (BT && BT->isFloatingPoint())) {
+ if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
+ (BT && BT->isFloatingPoint())) {
QualType QT(T, 0);
it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
continue;
@@ -2838,8 +2852,9 @@ public:
class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {}
+ PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
+ PPC64_SVR4_ABIInfo::ABIKind Kind)
+ : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
// This is recovered from gcc output.
@@ -2891,16 +2906,207 @@ PPC64_SVR4_ABIInfo::isPromotableTypeForA
return false;
}
+/// isAlignedParamType - Determine whether a type requires 16-byte
+/// alignment in the parameter area.
+bool
+PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const {
+ // Complex types are passed just like their elements.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>())
+ Ty = CTy->getElementType();
+
+ // Only vector types of size 16 bytes need alignment (larger types are
+ // passed via reference, smaller types are not aligned).
+ if (Ty->isVectorType())
+ return getContext().getTypeSize(Ty) == 128;
+
+ // For single-element float/vector structs, we consider the whole type
+ // to have the same alignment requirements as its single element.
+ const Type *AlignAsType = NULL;
+ const Type *EltType = isSingleElementStruct(Ty, getContext());
+ if (EltType) {
+ const BuiltinType *BT = EltType->getAs<BuiltinType>();
+ if ((EltType->isVectorType() &&
+ getContext().getTypeSize(EltType) == 128) ||
+ (BT && BT->isFloatingPoint()))
+ AlignAsType = EltType;
+ }
+
+ // Likewise for ELFv2 homogeneous aggregates.
+ const Type *Base = NULL;
+ uint64_t Members = 0;
+ if (!AlignAsType && Kind == ELFv2 &&
+ isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
+ AlignAsType = Base;
+
+ // With special case aggregates, only vector base types need alignment.
+ if (AlignAsType)
+ return AlignAsType->isVectorType();
+
+ // Otherwise, we only need alignment for any aggregate type that
+ // has an alignment requirement of >= 16 bytes.
+ if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128)
+ return true;
+
+ return false;
+}
+
+/// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
+/// aggregate. Base is set to the base element type, and Members is set
+/// to the number of base elements.
+bool
+PPC64_SVR4_ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ uint64_t &Members) const {
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ uint64_t NElements = AT->getSize().getZExtValue();
+ if (NElements == 0)
+ return false;
+ if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
+ return false;
+ Members *= NElements;
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ Members = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ // Ignore (non-zero arrays of) empty records.
+ QualType FT = FD->getType();
+ while (const ConstantArrayType *AT =
+ getContext().getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() == 0)
+ return false;
+ FT = AT->getElementType();
+ }
+ if (isEmptyRecord(getContext(), FT, true))
+ continue;
+
+ // For compatibility with GCC, ignore empty bitfields in C++ mode.
+ if (getContext().getLangOpts().CPlusPlus &&
+ FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
+ continue;
+
+ uint64_t FldMembers;
+ if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
+ return false;
+
+ Members = (RD->isUnion() ?
+ std::max(Members, FldMembers) : Members + FldMembers);
+ }
+
+ if (!Base)
+ return false;
+
+ // Ensure there is no padding.
+ if (getContext().getTypeSize(Base) * Members !=
+ getContext().getTypeSize(Ty))
+ return false;
+ } else {
+ Members = 1;
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ Members = 2;
+ Ty = CT->getElementType();
+ }
+
+ // Homogeneous aggregates for ELFv2 must have base types of float,
+ // double, long double, or 128-bit vectors.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() != BuiltinType::Float &&
+ BT->getKind() != BuiltinType::Double &&
+ BT->getKind() != BuiltinType::LongDouble)
+ return false;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ if (getContext().getTypeSize(VT) != 128)
+ return false;
+ } else {
+ return false;
+ }
+
+ // The base type must be the same for all members. Types that
+ // agree in both total size and mode (float vs. vector) are
+ // treated as being equivalent here.
+ const Type *TyPtr = Ty.getTypePtr();
+ if (!Base)
+ Base = TyPtr;
+
+ if (Base->isVectorType() != TyPtr->isVectorType() ||
+ getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
+ return false;
+ }
+
+ // Vector types require one register, floating point types require one
+ // or two registers depending on their size.
+ uint32_t NumRegs = Base->isVectorType() ? 1 :
+ (getContext().getTypeSize(Base) + 63) / 64;
+
+ // Homogeneous Aggregates may occupy at most 8 registers.
+ return (Members > 0 && Members * NumRegs <= 8);
+}
+
ABIArgInfo
PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
if (Ty->isAnyComplexType())
return ABIArgInfo::getDirect();
+ // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
+ // or via reference (larger than 16 bytes).
+ if (Ty->isVectorType()) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size > 128)
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ else if (Size < 128) {
+ llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+ }
+
if (isAggregateTypeForABI(Ty)) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
- return ABIArgInfo::getIndirect(0);
+ uint64_t ABIAlign = isAlignedParamType(Ty)? 16 : 8;
+ uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
+
+ // ELFv2 homogeneous aggregates are passed as array types.
+ const Type *Base = NULL;
+ uint64_t Members = 0;
+ if (Kind == ELFv2 &&
+ isHomogeneousAggregate(Ty, Base, Members)) {
+ llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
+ llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // If an aggregate may end up fully in registers, we do not
+ // use the ByVal method, but pass the aggregate as array.
+ // This is usually beneficial since we avoid forcing the
+ // back-end to store the argument to memory.
+ uint64_t Bits = getContext().getTypeSize(Ty);
+ if (Bits > 0 && Bits <= 8 * GPRBits) {
+ llvm::Type *CoerceTy;
+
+ // Types up to 8 bytes are passed as integer type (which will be
+ // properly aligned in the argument save area doubleword).
+ if (Bits <= GPRBits)
+ CoerceTy = llvm::IntegerType::get(getVMContext(),
+ llvm::RoundUpToAlignment(Bits, 8));
+ // Larger types are passed as arrays, with the base type selected
+ // according to the required alignment in the save area.
+ else {
+ uint64_t RegBits = ABIAlign * 8;
+ uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits;
+ llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
+ CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
+ }
+
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // All other aggregates are passed ByVal.
+ return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
+ /*Realign=*/TyAlign > ABIAlign);
}
return (isPromotableTypeForABI(Ty) ?
@@ -2915,8 +3121,52 @@ PPC64_SVR4_ABIInfo::classifyReturnType(Q
if (RetTy->isAnyComplexType())
return ABIArgInfo::getDirect();
- if (isAggregateTypeForABI(RetTy))
+ // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
+ // or via reference (larger than 16 bytes).
+ if (RetTy->isVectorType()) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size > 128)
+ return ABIArgInfo::getIndirect(0);
+ else if (Size < 128) {
+ llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+ }
+
+ if (isAggregateTypeForABI(RetTy)) {
+ if (const RecordType *RT = RetTy->getAs<RecordType>())
+ if (isRecordReturnIndirect(RT, getCXXABI()))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // ELFv2 homogeneous aggregates are returned as array types.
+ const Type *Base = NULL;
+ uint64_t Members = 0;
+ if (Kind == ELFv2 &&
+ isHomogeneousAggregate(RetTy, Base, Members)) {
+ llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
+ llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // ELFv2 small aggregates are returned in up to two registers.
+ uint64_t Bits = getContext().getTypeSize(RetTy);
+ if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
+ if (Bits == 0)
+ return ABIArgInfo::getIgnore();
+
+ llvm::Type *CoerceTy;
+ if (Bits > GPRBits) {
+ CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
+ CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, NULL);
+ } else
+ CoerceTy = llvm::IntegerType::get(getVMContext(),
+ llvm::RoundUpToAlignment(Bits, 8));
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // All other aggregates are returned indirectly.
return ABIArgInfo::getIndirect(0);
+ }
return (isPromotableTypeForABI(RetTy) ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
@@ -2933,6 +3183,14 @@ llvm::Value *PPC64_SVR4_ABIInfo::EmitVAA
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ // Handle types that require 16-byte alignment in the parameter save area.
+ if (isAlignedParamType(Ty)) {
+ llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
+ AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(15));
+ AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(-16));
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
+ }
+
// Update the va_list pointer. The pointer should be bumped by the
// size of the object. We can trust getTypeSize() except for a complex
// type whose base type is smaller than a doubleword. For these, the
@@ -2963,8 +3221,12 @@ llvm::Value *PPC64_SVR4_ABIInfo::EmitVAA
if (CplxBaseSize && CplxBaseSize < 8) {
llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
llvm::Value *ImagAddr = RealAddr;
- RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
- ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
+ if (CGF.CGM.getDataLayout().isBigEndian()) {
+ RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
+ ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
+ } else {
+ ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8));
+ }
llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
@@ -2982,7 +3244,7 @@ llvm::Value *PPC64_SVR4_ABIInfo::EmitVAA
// If the argument is smaller than 8 bytes, it is right-adjusted in
// its doubleword slot. Adjust the pointer to pick it up from the
// correct offset.
- if (SizeInBytes < 8) {
+ if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) {
llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
@@ -5550,13 +5812,20 @@ const TargetCodeGenInfo &CodeGenModule::
case llvm::Triple::ppc:
return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
case llvm::Triple::ppc64:
- if (Triple.isOSBinFormatELF())
- return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types));
- else
+ if (Triple.isOSBinFormatELF()) {
+ // FIXME: Should be switchable via command-line option.
+ PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
+ return *(TheTargetCodeGenInfo =
+ new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
+ } else
return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
- case llvm::Triple::ppc64le:
+ case llvm::Triple::ppc64le: {
assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
- return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types));
+ // FIXME: Should be switchable via command-line option.
+ PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
+ return *(TheTargetCodeGenInfo =
+ new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
+ }
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
Index: llvm-suse/tools/clang/lib/Driver/ToolChains.cpp
===================================================================
--- llvm-suse.orig/tools/clang/lib/Driver/ToolChains.cpp
+++ llvm-suse/tools/clang/lib/Driver/ToolChains.cpp
@@ -2669,6 +2669,9 @@ void Linux::AddClangSystemIncludeArgs(co
const StringRef PPC64MultiarchIncludeDirs[] = {
"/usr/include/powerpc64-linux-gnu"
};
+ const StringRef PPC64LEMultiarchIncludeDirs[] = {
+ "/usr/include/powerpc64le-linux-gnu"
+ };
ArrayRef<StringRef> MultiarchIncludeDirs;
if (getTriple().getArch() == llvm::Triple::x86_64) {
MultiarchIncludeDirs = X86_64MultiarchIncludeDirs;
@@ -2689,6 +2692,8 @@ void Linux::AddClangSystemIncludeArgs(co
MultiarchIncludeDirs = PPCMultiarchIncludeDirs;
} else if (getTriple().getArch() == llvm::Triple::ppc64) {
MultiarchIncludeDirs = PPC64MultiarchIncludeDirs;
+ } else if (getTriple().getArch() == llvm::Triple::ppc64le) {
+ MultiarchIncludeDirs = PPC64LEMultiarchIncludeDirs;
}
for (ArrayRef<StringRef>::iterator I = MultiarchIncludeDirs.begin(),
E = MultiarchIncludeDirs.end();
Index: llvm-suse/tools/clang/lib/Driver/Tools.cpp
===================================================================
--- llvm-suse.orig/tools/clang/lib/Driver/Tools.cpp
+++ llvm-suse/tools/clang/lib/Driver/Tools.cpp
@@ -1159,6 +1159,7 @@ static std::string getPPCTargetCPU(const
.Case("power6", "pwr6")
.Case("power6x", "pwr6x")
.Case("power7", "pwr7")
+ .Case("power8", "pwr8")
.Case("pwr3", "pwr3")
.Case("pwr4", "pwr4")
.Case("pwr5", "pwr5")
@@ -1166,6 +1167,7 @@ static std::string getPPCTargetCPU(const
.Case("pwr6", "pwr6")
.Case("pwr6x", "pwr6x")
.Case("pwr7", "pwr7")
+ .Case("pwr8", "pwr8")
.Case("powerpc", "ppc")
.Case("powerpc64", "ppc64")
.Case("powerpc64le", "ppc64le")
@@ -6210,8 +6212,9 @@ void gnutools::Assemble::ConstructJob(Co
CmdArgs.push_back("-many");
} else if (getToolChain().getArch() == llvm::Triple::ppc64le) {
CmdArgs.push_back("-a64");
- CmdArgs.push_back("-mppc64le");
+ CmdArgs.push_back("-mppc64");
CmdArgs.push_back("-many");
+ CmdArgs.push_back("-mlittle-endian");
} else if (getToolChain().getArch() == llvm::Triple::arm) {
StringRef MArch = getToolChain().getArchName();
if (MArch == "armv7" || MArch == "armv7a" || MArch == "armv7-a")
@@ -6378,9 +6381,10 @@ static StringRef getLinuxDynamicLinker(c
} else if (ToolChain.getArch() == llvm::Triple::ppc)
return "/lib/ld.so.1";
else if (ToolChain.getArch() == llvm::Triple::ppc64 ||
- ToolChain.getArch() == llvm::Triple::ppc64le ||
ToolChain.getArch() == llvm::Triple::systemz)
return "/lib64/ld64.so.1";
+ else if (ToolChain.getArch() == llvm::Triple::ppc64le)
+ return "/lib64/ld64.so.2";
else
return "/lib64/ld-linux-x86-64.so.2";
}
@@ -6443,6 +6447,8 @@ void gnutools::Link::ConstructJob(Compil
CmdArgs.push_back("elf32ppclinux");
else if (ToolChain.getArch() == llvm::Triple::ppc64)
CmdArgs.push_back("elf64ppc");
+ else if (ToolChain.getArch() == llvm::Triple::ppc64le)
+ CmdArgs.push_back("elf64lppc");
else if (ToolChain.getArch() == llvm::Triple::mips)
CmdArgs.push_back("elf32btsmip");
else if (ToolChain.getArch() == llvm::Triple::mipsel)
Index: llvm-suse/tools/clang/lib/Headers/altivec.h
===================================================================
--- llvm-suse.orig/tools/clang/lib/Headers/altivec.h
+++ llvm-suse/tools/clang/lib/Headers/altivec.h
@@ -73,6 +73,9 @@ vec_perm(vector bool int __a, vector boo
static vector float __ATTRS_o_ai
vec_perm(vector float __a, vector float __b, vector unsigned char __c);
+static vector unsigned char __ATTRS_o_ai
+vec_xor(vector unsigned char __a, vector unsigned char __b);
+
/* vec_abs */
#define __builtin_altivec_abs_v16qi vec_abs
@@ -3485,30 +3488,49 @@ vec_mtvscr(vector float __a)
__builtin_altivec_mtvscr((vector int)__a);
}
+/* The vmulos* and vmules* instructions have a big endian bias, so
+ we must reverse the meaning of "even" and "odd" for little endian. */
+
/* vec_mule */
static vector short __ATTRS_o_ai
vec_mule(vector signed char __a, vector signed char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosb(__a, __b);
+#else
return __builtin_altivec_vmulesb(__a, __b);
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_mule(vector unsigned char __a, vector unsigned char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuloub(__a, __b);
+#else
return __builtin_altivec_vmuleub(__a, __b);
+#endif
}
static vector int __ATTRS_o_ai
vec_mule(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosh(__a, __b);
+#else
return __builtin_altivec_vmulesh(__a, __b);
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_mule(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulouh(__a, __b);
+#else
return __builtin_altivec_vmuleuh(__a, __b);
+#endif
}
/* vec_vmulesb */
@@ -3516,7 +3538,11 @@ vec_mule(vector unsigned short __a, vect
static vector short __attribute__((__always_inline__))
vec_vmulesb(vector signed char __a, vector signed char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosb(__a, __b);
+#else
return __builtin_altivec_vmulesb(__a, __b);
+#endif
}
/* vec_vmuleub */
@@ -3524,7 +3550,11 @@ vec_vmulesb(vector signed char __a, vect
static vector unsigned short __attribute__((__always_inline__))
vec_vmuleub(vector unsigned char __a, vector unsigned char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuloub(__a, __b);
+#else
return __builtin_altivec_vmuleub(__a, __b);
+#endif
}
/* vec_vmulesh */
@@ -3532,7 +3562,11 @@ vec_vmuleub(vector unsigned char __a, ve
static vector int __attribute__((__always_inline__))
vec_vmulesh(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosh(__a, __b);
+#else
return __builtin_altivec_vmulesh(__a, __b);
+#endif
}
/* vec_vmuleuh */
@@ -3540,7 +3574,11 @@ vec_vmulesh(vector short __a, vector sho
static vector unsigned int __attribute__((__always_inline__))
vec_vmuleuh(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulouh(__a, __b);
+#else
return __builtin_altivec_vmuleuh(__a, __b);
+#endif
}
/* vec_mulo */
@@ -3548,25 +3586,41 @@ vec_vmuleuh(vector unsigned short __a, v
static vector short __ATTRS_o_ai
vec_mulo(vector signed char __a, vector signed char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesb(__a, __b);
+#else
return __builtin_altivec_vmulosb(__a, __b);
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_mulo(vector unsigned char __a, vector unsigned char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleub(__a, __b);
+#else
return __builtin_altivec_vmuloub(__a, __b);
+#endif
}
static vector int __ATTRS_o_ai
vec_mulo(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesh(__a, __b);
+#else
return __builtin_altivec_vmulosh(__a, __b);
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_mulo(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleuh(__a, __b);
+#else
return __builtin_altivec_vmulouh(__a, __b);
+#endif
}
/* vec_vmulosb */
@@ -3574,7 +3628,11 @@ vec_mulo(vector unsigned short __a, vect
static vector short __attribute__((__always_inline__))
vec_vmulosb(vector signed char __a, vector signed char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesb(__a, __b);
+#else
return __builtin_altivec_vmulosb(__a, __b);
+#endif
}
/* vec_vmuloub */
@@ -3582,7 +3640,11 @@ vec_vmulosb(vector signed char __a, vect
static vector unsigned short __attribute__((__always_inline__))
vec_vmuloub(vector unsigned char __a, vector unsigned char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleub(__a, __b);
+#else
return __builtin_altivec_vmuloub(__a, __b);
+#endif
}
/* vec_vmulosh */
@@ -3590,7 +3652,11 @@ vec_vmuloub(vector unsigned char __a, ve
static vector int __attribute__((__always_inline__))
vec_vmulosh(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesh(__a, __b);
+#else
return __builtin_altivec_vmulosh(__a, __b);
+#endif
}
/* vec_vmulouh */
@@ -3598,7 +3664,11 @@ vec_vmulosh(vector short __a, vector sho
static vector unsigned int __attribute__((__always_inline__))
vec_vmulouh(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleuh(__a, __b);
+#else
return __builtin_altivec_vmulouh(__a, __b);
+#endif
}
/* vec_nmsub */
@@ -4047,52 +4117,91 @@ vec_vor(vector float __a, vector bool in
/* vec_pack */
+/* The various vector pack instructions have a big-endian bias, so for
+ little endian we must handle reversed element numbering. */
+
static vector signed char __ATTRS_o_ai
vec_pack(vector signed short __a, vector signed short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector signed char)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
return (vector signed char)vec_perm(__a, __b, (vector unsigned char)
(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
}
static vector unsigned char __ATTRS_o_ai
vec_pack(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned char)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
return (vector unsigned char)vec_perm(__a, __b, (vector unsigned char)
(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
}
static vector bool char __ATTRS_o_ai
vec_pack(vector bool short __a, vector bool short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool char)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
return (vector bool char)vec_perm(__a, __b, (vector unsigned char)
(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
}
static vector short __ATTRS_o_ai
vec_pack(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector short)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
return (vector short)vec_perm(__a, __b, (vector unsigned char)
(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_pack(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned short)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
return (vector unsigned short)vec_perm(__a, __b, (vector unsigned char)
(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
}
static vector bool short __ATTRS_o_ai
vec_pack(vector bool int __a, vector bool int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
return (vector bool short)vec_perm(__a, __b, (vector unsigned char)
(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
}
/* vec_vpkuhum */
@@ -4102,25 +4211,43 @@ vec_pack(vector bool int __a, vector boo
static vector signed char __ATTRS_o_ai
vec_vpkuhum(vector signed short __a, vector signed short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector signed char)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
return (vector signed char)vec_perm(__a, __b, (vector unsigned char)
(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
}
static vector unsigned char __ATTRS_o_ai
vec_vpkuhum(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned char)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
return (vector unsigned char)vec_perm(__a, __b, (vector unsigned char)
(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
}
static vector bool char __ATTRS_o_ai
vec_vpkuhum(vector bool short __a, vector bool short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool char)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
return (vector bool char)vec_perm(__a, __b, (vector unsigned char)
(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
}
/* vec_vpkuwum */
@@ -4130,25 +4257,43 @@ vec_vpkuhum(vector bool short __a, vecto
static vector short __ATTRS_o_ai
vec_vpkuwum(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector short)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
return (vector short)vec_perm(__a, __b, (vector unsigned char)
(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_vpkuwum(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned short)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
return (vector unsigned short)vec_perm(__a, __b, (vector unsigned char)
(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
}
static vector bool short __ATTRS_o_ai
vec_vpkuwum(vector bool int __a, vector bool int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
return (vector bool short)vec_perm(__a, __b, (vector unsigned char)
(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
}
/* vec_packpx */
@@ -4156,7 +4301,11 @@ vec_vpkuwum(vector bool int __a, vector
static vector pixel __attribute__((__always_inline__))
vec_packpx(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
+#else
return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
+#endif
}
/* vec_vpkpx */
@@ -4164,7 +4313,11 @@ vec_packpx(vector unsigned int __a, vect
static vector pixel __attribute__((__always_inline__))
vec_vpkpx(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
+#else
return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
+#endif
}
/* vec_packs */
@@ -4172,25 +4325,41 @@ vec_vpkpx(vector unsigned int __a, vecto
static vector signed char __ATTRS_o_ai
vec_packs(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshss(__b, __a);
+#else
return __builtin_altivec_vpkshss(__a, __b);
+#endif
}
static vector unsigned char __ATTRS_o_ai
vec_packs(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
return __builtin_altivec_vpkuhus(__a, __b);
+#endif
}
static vector signed short __ATTRS_o_ai
vec_packs(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswss(__b, __a);
+#else
return __builtin_altivec_vpkswss(__a, __b);
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_packs(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
return __builtin_altivec_vpkuwus(__a, __b);
+#endif
}
/* vec_vpkshss */
@@ -4198,7 +4367,11 @@ vec_packs(vector unsigned int __a, vecto
static vector signed char __attribute__((__always_inline__))
vec_vpkshss(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshss(__b, __a);
+#else
return __builtin_altivec_vpkshss(__a, __b);
+#endif
}
/* vec_vpkuhus */
@@ -4206,7 +4379,11 @@ vec_vpkshss(vector short __a, vector sho
static vector unsigned char __attribute__((__always_inline__))
vec_vpkuhus(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
return __builtin_altivec_vpkuhus(__a, __b);
+#endif
}
/* vec_vpkswss */
@@ -4214,7 +4391,11 @@ vec_vpkuhus(vector unsigned short __a, v
static vector signed short __attribute__((__always_inline__))
vec_vpkswss(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswss(__b, __a);
+#else
return __builtin_altivec_vpkswss(__a, __b);
+#endif
}
/* vec_vpkuwus */
@@ -4222,7 +4403,11 @@ vec_vpkswss(vector int __a, vector int _
static vector unsigned short __attribute__((__always_inline__))
vec_vpkuwus(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
return __builtin_altivec_vpkuwus(__a, __b);
+#endif
}
/* vec_packsu */
@@ -4230,25 +4415,41 @@ vec_vpkuwus(vector unsigned int __a, vec
static vector unsigned char __ATTRS_o_ai
vec_packsu(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshus(__b, __a);
+#else
return __builtin_altivec_vpkshus(__a, __b);
+#endif
}
static vector unsigned char __ATTRS_o_ai
vec_packsu(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
return __builtin_altivec_vpkuhus(__a, __b);
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_packsu(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswus(__b, __a);
+#else
return __builtin_altivec_vpkswus(__a, __b);
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_packsu(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
return __builtin_altivec_vpkuwus(__a, __b);
+#endif
}
/* vec_vpkshus */
@@ -4256,13 +4457,21 @@ vec_packsu(vector unsigned int __a, vect
static vector unsigned char __ATTRS_o_ai
vec_vpkshus(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshus(__b, __a);
+#else
return __builtin_altivec_vpkshus(__a, __b);
+#endif
}
static vector unsigned char __ATTRS_o_ai
vec_vpkshus(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
return __builtin_altivec_vpkuhus(__a, __b);
+#endif
}
/* vec_vpkswus */
@@ -4270,22 +4479,46 @@ vec_vpkshus(vector unsigned short __a, v
static vector unsigned short __ATTRS_o_ai
vec_vpkswus(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswus(__b, __a);
+#else
return __builtin_altivec_vpkswus(__a, __b);
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_vpkswus(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
return __builtin_altivec_vpkuwus(__a, __b);
+#endif
}
/* vec_perm */
+// The vperm instruction is defined architecturally with a big-endian bias.
+// For little endian, we swap the input operands and invert the permute
+// control vector. Only the rightmost 5 bits matter, so we could use
+// a vector of all 31s instead of all 255s to perform the inversion.
+// However, when the PCV is not a constant, using 255 has an advantage
+// in that the vec_xor can be recognized as a vec_nor (and for P8 and
+// later, possibly a vec_nand).
+
vector signed char __ATTRS_o_ai
vec_perm(vector signed char __a, vector signed char __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector signed char)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector signed char)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector unsigned char __ATTRS_o_ai
@@ -4293,22 +4526,46 @@ vec_perm(vector unsigned char __a,
vector unsigned char __b,
vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned char)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector unsigned char)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector bool char __ATTRS_o_ai
vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector bool char)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector bool char)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector short __ATTRS_o_ai
vec_perm(vector short __a, vector short __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector short)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector short)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector unsigned short __ATTRS_o_ai
@@ -4316,49 +4573,104 @@ vec_perm(vector unsigned short __a,
vector unsigned short __b,
vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned short)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector unsigned short)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector bool short __ATTRS_o_ai
vec_perm(vector bool short __a, vector bool short __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector bool short)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector bool short)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector pixel __ATTRS_o_ai
vec_perm(vector pixel __a, vector pixel __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector pixel)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector pixel)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector int __ATTRS_o_ai
vec_perm(vector int __a, vector int __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector int)__builtin_altivec_vperm_4si(__b, __a, __d);
+#else
return (vector int)__builtin_altivec_vperm_4si(__a, __b, __c);
+#endif
}
vector unsigned int __ATTRS_o_ai
vec_perm(vector unsigned int __a, vector unsigned int __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned int)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector unsigned int)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector bool int __ATTRS_o_ai
vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector bool int)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector bool int)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector float __ATTRS_o_ai
vec_perm(vector float __a, vector float __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector float)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector float)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
/* vec_vperm */
@@ -4366,8 +4678,7 @@ vec_perm(vector float __a, vector float
static vector signed char __ATTRS_o_ai
vec_vperm(vector signed char __a, vector signed char __b, vector unsigned char __c)
{
- return (vector signed char)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector unsigned char __ATTRS_o_ai
@@ -4375,22 +4686,19 @@ vec_vperm(vector unsigned char __a,
vector unsigned char __b,
vector unsigned char __c)
{
- return (vector unsigned char)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector bool char __ATTRS_o_ai
vec_vperm(vector bool char __a, vector bool char __b, vector unsigned char __c)
{
- return (vector bool char)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector short __ATTRS_o_ai
vec_vperm(vector short __a, vector short __b, vector unsigned char __c)
{
- return (vector short)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector unsigned short __ATTRS_o_ai
@@ -4398,49 +4706,43 @@ vec_vperm(vector unsigned short __a,
vector unsigned short __b,
vector unsigned char __c)
{
- return (vector unsigned short)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector bool short __ATTRS_o_ai
vec_vperm(vector bool short __a, vector bool short __b, vector unsigned char __c)
{
- return (vector bool short)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector pixel __ATTRS_o_ai
vec_vperm(vector pixel __a, vector pixel __b, vector unsigned char __c)
{
- return (vector pixel)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector int __ATTRS_o_ai
vec_vperm(vector int __a, vector int __b, vector unsigned char __c)
{
- return (vector int)__builtin_altivec_vperm_4si(__a, __b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector unsigned int __ATTRS_o_ai
vec_vperm(vector unsigned int __a, vector unsigned int __b, vector unsigned char __c)
{
- return (vector unsigned int)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector bool int __ATTRS_o_ai
vec_vperm(vector bool int __a, vector bool int __b, vector unsigned char __c)
{
- return (vector bool int)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector float __ATTRS_o_ai
vec_vperm(vector float __a, vector float __b, vector unsigned char __c)
{
- return (vector float)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
/* vec_re */
@@ -8054,10 +8356,26 @@ vec_vsum4shs(vector signed short __a, ve
/* vec_sum2s */
+/* The vsum2sws instruction has a big-endian bias, so that the second
+ input vector and the result always reference big-endian elements
+ 1 and 3 (little-endian element 0 and 2). For ease of porting the
+ programmer wants elements 1 and 3 in both cases, so for little
+ endian we must perform some permutes. */
+
static vector signed int __attribute__((__always_inline__))
vec_sum2s(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ vector int __c = (vector signed int)
+ vec_perm(__b, __b, (vector unsigned char)
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+ __c = __builtin_altivec_vsum2sws(__a, __c);
+ return (vector signed int)
+ vec_perm(__c, __c, (vector unsigned char)
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+#else
return __builtin_altivec_vsum2sws(__a, __b);
+#endif
}
/* vec_vsum2sws */
@@ -8065,15 +8383,37 @@ vec_sum2s(vector int __a, vector int __b
static vector signed int __attribute__((__always_inline__))
vec_vsum2sws(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ vector int __c = (vector signed int)
+ vec_perm(__b, __b, (vector unsigned char)
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+ __c = __builtin_altivec_vsum2sws(__a, __c);
+ return (vector signed int)
+ vec_perm(__c, __c, (vector unsigned char)
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+#else
return __builtin_altivec_vsum2sws(__a, __b);
+#endif
}
/* vec_sums */
+/* The vsumsws instruction has a big-endian bias, so that the second
+ input vector and the result always reference big-endian element 3
+ (little-endian element 0). For ease of porting the programmer
+ wants element 3 in both cases, so for little endian we must perform
+ some permutes. */
+
static vector signed int __attribute__((__always_inline__))
vec_sums(vector signed int __a, vector signed int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ __b = (vector signed int)vec_splat(__b, 3);
+ __b = __builtin_altivec_vsumsws(__a, __b);
+ return (vector signed int)(0, 0, 0, __b[0]);
+#else
return __builtin_altivec_vsumsws(__a, __b);
+#endif
}
/* vec_vsumsws */
@@ -8081,7 +8421,13 @@ vec_sums(vector signed int __a, vector s
static vector signed int __attribute__((__always_inline__))
vec_vsumsws(vector signed int __a, vector signed int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ __b = (vector signed int)vec_splat(__b, 3);
+ __b = __builtin_altivec_vsumsws(__a, __b);
+ return (vector signed int)(0, 0, 0, __b[0]);
+#else
return __builtin_altivec_vsumsws(__a, __b);
+#endif
}
/* vec_trunc */
@@ -8102,34 +8448,57 @@ vec_vrfiz(vector float __a)
/* vec_unpackh */
+/* The vector unpack instructions all have a big-endian bias, so for
+ little endian we must reverse the meanings of "high" and "low." */
+
static vector short __ATTRS_o_ai
vec_unpackh(vector signed char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsb((vector char)__a);
+#else
return __builtin_altivec_vupkhsb((vector char)__a);
+#endif
}
static vector bool short __ATTRS_o_ai
vec_unpackh(vector bool char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#else
return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#endif
}
static vector int __ATTRS_o_ai
vec_unpackh(vector short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsh(__a);
+#else
return __builtin_altivec_vupkhsh(__a);
+#endif
}
static vector bool int __ATTRS_o_ai
vec_unpackh(vector bool short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#else
return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_unpackh(vector pixel __a)
{
- return (vector unsigned int)__builtin_altivec_vupkhsh((vector short)__a);
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#endif
}
/* vec_vupkhsb */
@@ -8137,13 +8506,21 @@ vec_unpackh(vector pixel __a)
static vector short __ATTRS_o_ai
vec_vupkhsb(vector signed char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsb((vector char)__a);
+#else
return __builtin_altivec_vupkhsb((vector char)__a);
+#endif
}
static vector bool short __ATTRS_o_ai
vec_vupkhsb(vector bool char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#else
return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#endif
}
/* vec_vupkhsh */
@@ -8151,19 +8528,31 @@ vec_vupkhsb(vector bool char __a)
static vector int __ATTRS_o_ai
vec_vupkhsh(vector short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsh(__a);
+#else
return __builtin_altivec_vupkhsh(__a);
+#endif
}
static vector bool int __ATTRS_o_ai
vec_vupkhsh(vector bool short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#else
return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_vupkhsh(vector pixel __a)
{
- return (vector unsigned int)__builtin_altivec_vupkhsh((vector short)__a);
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#endif
}
/* vec_unpackl */
@@ -8171,31 +8560,51 @@ vec_vupkhsh(vector pixel __a)
static vector short __ATTRS_o_ai
vec_unpackl(vector signed char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#else
return __builtin_altivec_vupklsb((vector char)__a);
+#endif
}
static vector bool short __ATTRS_o_ai
vec_unpackl(vector bool char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#else
return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#endif
}
static vector int __ATTRS_o_ai
vec_unpackl(vector short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsh(__a);
+#else
return __builtin_altivec_vupklsh(__a);
+#endif
}
static vector bool int __ATTRS_o_ai
vec_unpackl(vector bool short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#else
return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_unpackl(vector pixel __a)
{
- return (vector unsigned int)__builtin_altivec_vupklsh((vector short)__a);
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#endif
}
/* vec_vupklsb */
@@ -8203,13 +8612,21 @@ vec_unpackl(vector pixel __a)
static vector short __ATTRS_o_ai
vec_vupklsb(vector signed char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#else
return __builtin_altivec_vupklsb((vector char)__a);
+#endif
}
static vector bool short __ATTRS_o_ai
vec_vupklsb(vector bool char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#else
return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#endif
}
/* vec_vupklsh */
@@ -8217,19 +8634,31 @@ vec_vupklsb(vector bool char __a)
static vector int __ATTRS_o_ai
vec_vupklsh(vector short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsh(__a);
+#else
return __builtin_altivec_vupklsh(__a);
+#endif
}
static vector bool int __ATTRS_o_ai
vec_vupklsh(vector bool short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#else
return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_vupklsh(vector pixel __a)
{
- return (vector unsigned int)__builtin_altivec_vupklsh((vector short)__a);
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#endif
}
/* vec_xor */
Index: llvm-suse/tools/clang/test/CodeGen/builtins-ppc-altivec.c
===================================================================
--- llvm-suse.orig/tools/clang/test/CodeGen/builtins-ppc-altivec.c
+++ llvm-suse/tools/clang/test/CodeGen/builtins-ppc-altivec.c
@@ -1,5 +1,7 @@
// REQUIRES: ppc32-registered-target
// RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -faltivec -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -faltivec -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE
vector bool char vbc = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 };
vector signed char vsc = { 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16 };
@@ -45,1267 +47,3737 @@ int res_f;
void test1() {
/* vec_abs */
- vsc = vec_abs(vsc); // CHECK: sub <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vmaxsb
-
- vs = vec_abs(vs); // CHECK: sub <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vmaxsh
-
- vi = vec_abs(vi); // CHECK: sub <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vmaxsw
-
- vf = vec_abs(vf); // CHECK: and <4 x i32>
+ vsc = vec_abs(vsc);
+// CHECK: sub <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vmaxsb
+// CHECK-LE: sub <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vmaxsb
+
+ vs = vec_abs(vs);
+// CHECK: sub <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vmaxsh
+// CHECK-LE: sub <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vmaxsh
+
+ vi = vec_abs(vi);
+// CHECK: sub <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vmaxsw
+// CHECK-LE: sub <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vmaxsw
+
+ vf = vec_abs(vf);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
/* vec_abs */
- vsc = vec_abss(vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
- // CHECK: @llvm.ppc.altivec.vmaxsb
-
- vs = vec_abss(vs); // CHECK: @llvm.ppc.altivec.vsubshs
- // CHECK: @llvm.ppc.altivec.vmaxsh
-
- vi = vec_abss(vi); // CHECK: @llvm.ppc.altivec.vsubsws
- // CHECK: @llvm.ppc.altivec.vmaxsw
+ vsc = vec_abss(vsc);
+// CHECK: @llvm.ppc.altivec.vsubsbs
+// CHECK: @llvm.ppc.altivec.vmaxsb
+// CHECK-LE: @llvm.ppc.altivec.vsubsbs
+// CHECK-LE: @llvm.ppc.altivec.vmaxsb
+
+ vs = vec_abss(vs);
+// CHECK: @llvm.ppc.altivec.vsubshs
+// CHECK: @llvm.ppc.altivec.vmaxsh
+// CHECK-LE: @llvm.ppc.altivec.vsubshs
+// CHECK-LE: @llvm.ppc.altivec.vmaxsh
+
+ vi = vec_abss(vi);
+// CHECK: @llvm.ppc.altivec.vsubsws
+// CHECK: @llvm.ppc.altivec.vmaxsw
+// CHECK-LE: @llvm.ppc.altivec.vsubsws
+// CHECK-LE: @llvm.ppc.altivec.vmaxsw
/* vec_add */
- res_vsc = vec_add(vsc, vsc); // CHECK: add <16 x i8>
- res_vsc = vec_add(vbc, vsc); // CHECK: add <16 x i8>
- res_vsc = vec_add(vsc, vbc); // CHECK: add <16 x i8>
- res_vuc = vec_add(vuc, vuc); // CHECK: add <16 x i8>
- res_vuc = vec_add(vbc, vuc); // CHECK: add <16 x i8>
- res_vuc = vec_add(vuc, vbc); // CHECK: add <16 x i8>
- res_vs = vec_add(vs, vs); // CHECK: add <8 x i16>
- res_vs = vec_add(vbs, vs); // CHECK: add <8 x i16>
- res_vs = vec_add(vs, vbs); // CHECK: add <8 x i16>
- res_vus = vec_add(vus, vus); // CHECK: add <8 x i16>
- res_vus = vec_add(vbs, vus); // CHECK: add <8 x i16>
- res_vus = vec_add(vus, vbs); // CHECK: add <8 x i16>
- res_vi = vec_add(vi, vi); // CHECK: add <4 x i32>
- res_vi = vec_add(vbi, vi); // CHECK: add <4 x i32>
- res_vi = vec_add(vi, vbi); // CHECK: add <4 x i32>
- res_vui = vec_add(vui, vui); // CHECK: add <4 x i32>
- res_vui = vec_add(vbi, vui); // CHECK: add <4 x i32>
- res_vui = vec_add(vui, vbi); // CHECK: add <4 x i32>
- res_vf = vec_add(vf, vf); // CHECK: fadd <4 x float>
- res_vsc = vec_vaddubm(vsc, vsc); // CHECK: add <16 x i8>
- res_vsc = vec_vaddubm(vbc, vsc); // CHECK: add <16 x i8>
- res_vsc = vec_vaddubm(vsc, vbc); // CHECK: add <16 x i8>
- res_vuc = vec_vaddubm(vuc, vuc); // CHECK: add <16 x i8>
- res_vuc = vec_vaddubm(vbc, vuc); // CHECK: add <16 x i8>
- res_vuc = vec_vaddubm(vuc, vbc); // CHECK: add <16 x i8>
- res_vs = vec_vadduhm(vs, vs); // CHECK: add <8 x i16>
- res_vs = vec_vadduhm(vbs, vs); // CHECK: add <8 x i16>
- res_vs = vec_vadduhm(vs, vbs); // CHECK: add <8 x i16>
- res_vus = vec_vadduhm(vus, vus); // CHECK: add <8 x i16>
- res_vus = vec_vadduhm(vbs, vus); // CHECK: add <8 x i16>
- res_vus = vec_vadduhm(vus, vbs); // CHECK: add <8 x i16>
- res_vi = vec_vadduwm(vi, vi); // CHECK: add <4 x i32>
- res_vi = vec_vadduwm(vbi, vi); // CHECK: add <4 x i32>
- res_vi = vec_vadduwm(vi, vbi); // CHECK: add <4 x i32>
- res_vui = vec_vadduwm(vui, vui); // CHECK: add <4 x i32>
- res_vui = vec_vadduwm(vbi, vui); // CHECK: add <4 x i32>
- res_vui = vec_vadduwm(vui, vbi); // CHECK: add <4 x i32>
- res_vf = vec_vaddfp(vf, vf); // CHECK: fadd <4 x float>
+ res_vsc = vec_add(vsc, vsc);
+// CHECK: add <16 x i8>
+// CHECK-LE: add <16 x i8>
+
+ res_vsc = vec_add(vbc, vsc);
+// CHECK: add <16 x i8>
+// CHECK-LE: add <16 x i8>
+
+ res_vsc = vec_add(vsc, vbc);
+// CHECK: add <16 x i8>
+// CHECK-LE: add <16 x i8>
+
+ res_vuc = vec_add(vuc, vuc);
+// CHECK: add <16 x i8>
+// CHECK-LE: add <16 x i8>
+
+ res_vuc = vec_add(vbc, vuc);
+// CHECK: add <16 x i8>
+// CHECK-LE: add <16 x i8>
+
+ res_vuc = vec_add(vuc, vbc);
+// CHECK: add <16 x i8>
+// CHECK-LE: add <16 x i8>
+
+ res_vs = vec_add(vs, vs);
+// CHECK: add <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vs = vec_add(vbs, vs);
+// CHECK: add <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vs = vec_add(vs, vbs);
+// CHECK: add <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vus = vec_add(vus, vus);
+// CHECK: add <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vus = vec_add(vbs, vus);
+// CHECK: add <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vus = vec_add(vus, vbs);
+// CHECK: add <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vi = vec_add(vi, vi);
+// CHECK: add <4 x i32>
+// CHECK-LE: add <4 x i32>
+
+ res_vi = vec_add(vbi, vi);
+// CHECK: add <4 x i32>
+// CHECK-LE: add <4 x i32>
+
+ res_vi = vec_add(vi, vbi);
+// CHECK: add <4 x i32>
+// CHECK-LE: add <4 x i32>
+
+ res_vui = vec_add(vui, vui);
+// CHECK: add <4 x i32>
+// CHECK-LE: add <4 x i32>
+
+ res_vui = vec_add(vbi, vui);
+// CHECK: add <4 x i32>
+// CHECK-LE: add <4 x i32>
+
+ res_vui = vec_add(vui, vbi);
+// CHECK: add <4 x i32>
+// CHECK-LE: add <4 x i32>
+
+ res_vf = vec_add(vf, vf);
+// CHECK: fadd <4 x float>
+// CHECK-LE: fadd <4 x float>
+
+ res_vsc = vec_vaddubm(vsc, vsc);
+// CHECK: add <16 x i8>
+// CHECK-LE: add <16 x i8>
+
+ res_vsc = vec_vaddubm(vbc, vsc);
+// CHECK: add <16 x i8>
+// CHECK-LE: add <16 x i8>
+
+ res_vsc = vec_vaddubm(vsc, vbc);
+// CHECK: add <16 x i8>
+// CHECK-LE: add <16 x i8>
+
+ res_vuc = vec_vaddubm(vuc, vuc);
+// CHECK: add <16 x i8>
+// CHECK-LE: add <16 x i8>
+
+ res_vuc = vec_vaddubm(vbc, vuc);
+// CHECK: add <16 x i8>
+// CHECK-LE: add <16 x i8>
+
+ res_vuc = vec_vaddubm(vuc, vbc);
+// CHECK: add <16 x i8>
+// CHECK-LE: add <16 x i8>
+
+ res_vs = vec_vadduhm(vs, vs);
+// CHECK: add <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vs = vec_vadduhm(vbs, vs);
+// CHECK: add <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vs = vec_vadduhm(vs, vbs);
+// CHECK: add <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vus = vec_vadduhm(vus, vus);
+// CHECK: add <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vus = vec_vadduhm(vbs, vus);
+// CHECK: add <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vus = vec_vadduhm(vus, vbs);
+// CHECK: add <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vi = vec_vadduwm(vi, vi);
+// CHECK: add <4 x i32>
+// CHECK-LE: add <4 x i32>
+
+ res_vi = vec_vadduwm(vbi, vi);
+// CHECK: add <4 x i32>
+// CHECK-LE: add <4 x i32>
+
+ res_vi = vec_vadduwm(vi, vbi);
+// CHECK: add <4 x i32>
+// CHECK-LE: add <4 x i32>
+
+ res_vui = vec_vadduwm(vui, vui);
+// CHECK: add <4 x i32>
+// CHECK-LE: add <4 x i32>
+
+ res_vui = vec_vadduwm(vbi, vui);
+// CHECK: add <4 x i32>
+// CHECK-LE: add <4 x i32>
+
+ res_vui = vec_vadduwm(vui, vbi);
+// CHECK: add <4 x i32>
+// CHECK-LE: add <4 x i32>
+
+ res_vf = vec_vaddfp(vf, vf);
+// CHECK: fadd <4 x float>
+// CHECK-LE: fadd <4 x float>
/* vec_addc */
- res_vui = vec_addc(vui, vui); // HECK: @llvm.ppc.altivec.vaddcuw
- res_vui = vec_vaddcuw(vui, vui); // HECK: @llvm.ppc.altivec.vaddcuw
+ res_vui = vec_addc(vui, vui);
+// CHECK: @llvm.ppc.altivec.vaddcuw
+// CHECK-LE: @llvm.ppc.altivec.vaddcuw
+
+ res_vui = vec_vaddcuw(vui, vui);
+// CHECK: @llvm.ppc.altivec.vaddcuw
+// CHECK-LE: @llvm.ppc.altivec.vaddcuw
/* vec_adds */
- res_vsc = vec_adds(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
- res_vsc = vec_adds(vbc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
- res_vsc = vec_adds(vsc, vbc); // CHECK: @llvm.ppc.altivec.vaddsbs
- res_vuc = vec_adds(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
- res_vuc = vec_adds(vbc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
- res_vuc = vec_adds(vuc, vbc); // CHECK: @llvm.ppc.altivec.vaddubs
- res_vs = vec_adds(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
- res_vs = vec_adds(vbs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
- res_vs = vec_adds(vs, vbs); // CHECK: @llvm.ppc.altivec.vaddshs
- res_vus = vec_adds(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs
- res_vus = vec_adds(vbs, vus); // CHECK: @llvm.ppc.altivec.vadduhs
- res_vus = vec_adds(vus, vbs); // CHECK: @llvm.ppc.altivec.vadduhs
- res_vi = vec_adds(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
- res_vi = vec_adds(vbi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
- res_vi = vec_adds(vi, vbi); // CHECK: @llvm.ppc.altivec.vaddsws
- res_vui = vec_adds(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws
- res_vui = vec_adds(vbi, vui); // CHECK: @llvm.ppc.altivec.vadduws
- res_vui = vec_adds(vui, vbi); // CHECK: @llvm.ppc.altivec.vadduws
- res_vsc = vec_vaddsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
- res_vsc = vec_vaddsbs(vbc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
- res_vsc = vec_vaddsbs(vsc, vbc); // CHECK: @llvm.ppc.altivec.vaddsbs
- res_vuc = vec_vaddubs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
- res_vuc = vec_vaddubs(vbc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
- res_vuc = vec_vaddubs(vuc, vbc); // CHECK: @llvm.ppc.altivec.vaddubs
- res_vs = vec_vaddshs(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
- res_vs = vec_vaddshs(vbs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
- res_vs = vec_vaddshs(vs, vbs); // CHECK: @llvm.ppc.altivec.vaddshs
- res_vus = vec_vadduhs(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs
- res_vus = vec_vadduhs(vbs, vus); // CHECK: @llvm.ppc.altivec.vadduhs
- res_vus = vec_vadduhs(vus, vbs); // CHECK: @llvm.ppc.altivec.vadduhs
- res_vi = vec_vaddsws(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
- res_vi = vec_vaddsws(vbi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
- res_vi = vec_vaddsws(vi, vbi); // CHECK: @llvm.ppc.altivec.vaddsws
- res_vui = vec_vadduws(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws
- res_vui = vec_vadduws(vbi, vui); // CHECK: @llvm.ppc.altivec.vadduws
- res_vui = vec_vadduws(vui, vbi); // CHECK: @llvm.ppc.altivec.vadduws
+ res_vsc = vec_adds(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vaddsbs
+// CHECK-LE: @llvm.ppc.altivec.vaddsbs
+
+ res_vsc = vec_adds(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vaddsbs
+// CHECK-LE: @llvm.ppc.altivec.vaddsbs
+
+ res_vsc = vec_adds(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vaddsbs
+// CHECK-LE: @llvm.ppc.altivec.vaddsbs
+
+ res_vuc = vec_adds(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vaddubs
+// CHECK-LE: @llvm.ppc.altivec.vaddubs
+
+ res_vuc = vec_adds(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vaddubs
+// CHECK-LE: @llvm.ppc.altivec.vaddubs
+
+ res_vuc = vec_adds(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vaddubs
+// CHECK-LE: @llvm.ppc.altivec.vaddubs
+
+ res_vs = vec_adds(vs, vs);
+// CHECK: @llvm.ppc.altivec.vaddshs
+// CHECK-LE: @llvm.ppc.altivec.vaddshs
+
+ res_vs = vec_adds(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vaddshs
+// CHECK-LE: @llvm.ppc.altivec.vaddshs
+
+ res_vs = vec_adds(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vaddshs
+// CHECK-LE: @llvm.ppc.altivec.vaddshs
+
+ res_vus = vec_adds(vus, vus);
+// CHECK: @llvm.ppc.altivec.vadduhs
+// CHECK-LE: @llvm.ppc.altivec.vadduhs
+
+ res_vus = vec_adds(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vadduhs
+// CHECK-LE: @llvm.ppc.altivec.vadduhs
+
+ res_vus = vec_adds(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vadduhs
+// CHECK-LE: @llvm.ppc.altivec.vadduhs
+
+ res_vi = vec_adds(vi, vi);
+// CHECK: @llvm.ppc.altivec.vaddsws
+// CHECK-LE: @llvm.ppc.altivec.vaddsws
+
+ res_vi = vec_adds(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vaddsws
+// CHECK-LE: @llvm.ppc.altivec.vaddsws
+
+ res_vi = vec_adds(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vaddsws
+// CHECK-LE: @llvm.ppc.altivec.vaddsws
+
+ res_vui = vec_adds(vui, vui);
+// CHECK: @llvm.ppc.altivec.vadduws
+// CHECK-LE: @llvm.ppc.altivec.vadduws
+
+ res_vui = vec_adds(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vadduws
+// CHECK-LE: @llvm.ppc.altivec.vadduws
+
+ res_vui = vec_adds(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vadduws
+// CHECK-LE: @llvm.ppc.altivec.vadduws
+
+ res_vsc = vec_vaddsbs(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vaddsbs
+// CHECK-LE: @llvm.ppc.altivec.vaddsbs
+
+ res_vsc = vec_vaddsbs(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vaddsbs
+// CHECK-LE: @llvm.ppc.altivec.vaddsbs
+
+ res_vsc = vec_vaddsbs(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vaddsbs
+// CHECK-LE: @llvm.ppc.altivec.vaddsbs
+
+ res_vuc = vec_vaddubs(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vaddubs
+// CHECK-LE: @llvm.ppc.altivec.vaddubs
+
+ res_vuc = vec_vaddubs(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vaddubs
+// CHECK-LE: @llvm.ppc.altivec.vaddubs
+
+ res_vuc = vec_vaddubs(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vaddubs
+// CHECK-LE: @llvm.ppc.altivec.vaddubs
+
+ res_vs = vec_vaddshs(vs, vs);
+// CHECK: @llvm.ppc.altivec.vaddshs
+// CHECK-LE: @llvm.ppc.altivec.vaddshs
+
+ res_vs = vec_vaddshs(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vaddshs
+// CHECK-LE: @llvm.ppc.altivec.vaddshs
+
+ res_vs = vec_vaddshs(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vaddshs
+// CHECK-LE: @llvm.ppc.altivec.vaddshs
+
+ res_vus = vec_vadduhs(vus, vus);
+// CHECK: @llvm.ppc.altivec.vadduhs
+// CHECK-LE: @llvm.ppc.altivec.vadduhs
+
+ res_vus = vec_vadduhs(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vadduhs
+// CHECK-LE: @llvm.ppc.altivec.vadduhs
+
+ res_vus = vec_vadduhs(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vadduhs
+// CHECK-LE: @llvm.ppc.altivec.vadduhs
+
+ res_vi = vec_vaddsws(vi, vi);
+// CHECK: @llvm.ppc.altivec.vaddsws
+// CHECK-LE: @llvm.ppc.altivec.vaddsws
+
+ res_vi = vec_vaddsws(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vaddsws
+// CHECK-LE: @llvm.ppc.altivec.vaddsws
+
+ res_vi = vec_vaddsws(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vaddsws
+// CHECK-LE: @llvm.ppc.altivec.vaddsws
+
+ res_vui = vec_vadduws(vui, vui);
+// CHECK: @llvm.ppc.altivec.vadduws
+// CHECK-LE: @llvm.ppc.altivec.vadduws
+
+ res_vui = vec_vadduws(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vadduws
+// CHECK-LE: @llvm.ppc.altivec.vadduws
+
+ res_vui = vec_vadduws(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vadduws
+// CHECK-LE: @llvm.ppc.altivec.vadduws
/* vec_and */
- res_vsc = vec_and(vsc, vsc); // CHECK: and <16 x i8>
- res_vsc = vec_and(vbc, vsc); // CHECK: and <16 x i8>
- res_vsc = vec_and(vsc, vbc); // CHECK: and <16 x i8>
- res_vuc = vec_and(vuc, vuc); // CHECK: and <16 x i8>
- res_vuc = vec_and(vbc, vuc); // CHECK: and <16 x i8>
- res_vuc = vec_and(vuc, vbc); // CHECK: and <16 x i8>
- res_vbc = vec_and(vbc, vbc); // CHECK: and <16 x i8>
- res_vs = vec_and(vs, vs); // CHECK: and <8 x i16>
- res_vs = vec_and(vbs, vs); // CHECK: and <8 x i16>
- res_vs = vec_and(vs, vbs); // CHECK: and <8 x i16>
- res_vus = vec_and(vus, vus); // CHECK: and <8 x i16>
- res_vus = vec_and(vbs, vus); // CHECK: and <8 x i16>
- res_vus = vec_and(vus, vbs); // CHECK: and <8 x i16>
- res_vbs = vec_and(vbs, vbs); // CHECK: and <8 x i16>
- res_vi = vec_and(vi, vi); // CHECK: and <4 x i32>
- res_vi = vec_and(vbi, vi); // CHECK: and <4 x i32>
- res_vi = vec_and(vi, vbi); // CHECK: and <4 x i32>
- res_vui = vec_and(vui, vui); // CHECK: and <4 x i32>
- res_vui = vec_and(vbi, vui); // CHECK: and <4 x i32>
- res_vui = vec_and(vui, vbi); // CHECK: and <4 x i32>
- res_vbi = vec_and(vbi, vbi); // CHECK: and <4 x i32>
- res_vsc = vec_vand(vsc, vsc); // CHECK: and <16 x i8>
- res_vsc = vec_vand(vbc, vsc); // CHECK: and <16 x i8>
- res_vsc = vec_vand(vsc, vbc); // CHECK: and <16 x i8>
- res_vuc = vec_vand(vuc, vuc); // CHECK: and <16 x i8>
- res_vuc = vec_vand(vbc, vuc); // CHECK: and <16 x i8>
- res_vuc = vec_vand(vuc, vbc); // CHECK: and <16 x i8>
- res_vbc = vec_vand(vbc, vbc); // CHECK: and <16 x i8>
- res_vs = vec_vand(vs, vs); // CHECK: and <8 x i16>
- res_vs = vec_vand(vbs, vs); // CHECK: and <8 x i16>
- res_vs = vec_vand(vs, vbs); // CHECK: and <8 x i16>
- res_vus = vec_vand(vus, vus); // CHECK: and <8 x i16>
- res_vus = vec_vand(vbs, vus); // CHECK: and <8 x i16>
- res_vus = vec_vand(vus, vbs); // CHECK: and <8 x i16>
- res_vbs = vec_vand(vbs, vbs); // CHECK: and <8 x i16>
- res_vi = vec_vand(vi, vi); // CHECK: and <4 x i32>
- res_vi = vec_vand(vbi, vi); // CHECK: and <4 x i32>
- res_vi = vec_vand(vi, vbi); // CHECK: and <4 x i32>
- res_vui = vec_vand(vui, vui); // CHECK: and <4 x i32>
- res_vui = vec_vand(vbi, vui); // CHECK: and <4 x i32>
- res_vui = vec_vand(vui, vbi); // CHECK: and <4 x i32>
- res_vbi = vec_vand(vbi, vbi); // CHECK: and <4 x i32>
+ res_vsc = vec_and(vsc, vsc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vsc = vec_and(vbc, vsc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vsc = vec_and(vsc, vbc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vuc = vec_and(vuc, vuc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vuc = vec_and(vbc, vuc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vuc = vec_and(vuc, vbc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vbc = vec_and(vbc, vbc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vs = vec_and(vs, vs);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vs = vec_and(vbs, vs);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vs = vec_and(vs, vbs);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vus = vec_and(vus, vus);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vus = vec_and(vbs, vus);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vus = vec_and(vus, vbs);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vbs = vec_and(vbs, vbs);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vi = vec_and(vi, vi);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vi = vec_and(vbi, vi);
+// CHECK: and <4 x i32>
+// CHECK-le: and <4 x i32>
+
+ res_vi = vec_and(vi, vbi);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vui = vec_and(vui, vui);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vui = vec_and(vbi, vui);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vui = vec_and(vui, vbi);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vbi = vec_and(vbi, vbi);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vsc = vec_vand(vsc, vsc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vsc = vec_vand(vbc, vsc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vsc = vec_vand(vsc, vbc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vuc = vec_vand(vuc, vuc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vuc = vec_vand(vbc, vuc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vuc = vec_vand(vuc, vbc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vbc = vec_vand(vbc, vbc);
+// CHECK: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vs = vec_vand(vs, vs);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vs = vec_vand(vbs, vs);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vs = vec_vand(vs, vbs);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vus = vec_vand(vus, vus);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vus = vec_vand(vbs, vus);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vus = vec_vand(vus, vbs);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vbs = vec_vand(vbs, vbs);
+// CHECK: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vi = vec_vand(vi, vi);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vi = vec_vand(vbi, vi);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vi = vec_vand(vi, vbi);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vui = vec_vand(vui, vui);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vui = vec_vand(vbi, vui);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vui = vec_vand(vui, vbi);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vbi = vec_vand(vbi, vbi);
+// CHECK: and <4 x i32>
+// CHECK-LE: and <4 x i32>
/* vec_andc */
- res_vsc = vec_andc(vsc, vsc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vsc = vec_andc(vbc, vsc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vsc = vec_andc(vsc, vbc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vuc = vec_andc(vuc, vuc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vuc = vec_andc(vbc, vuc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vuc = vec_andc(vuc, vbc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vbc = vec_andc(vbc, vbc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vs = vec_andc(vs, vs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vs = vec_andc(vbs, vs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vs = vec_andc(vs, vbs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vus = vec_andc(vus, vus); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vus = vec_andc(vbs, vus); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vus = vec_andc(vus, vbs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vbs = vec_andc(vbs, vbs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vi = vec_andc(vi, vi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vi = vec_andc(vbi, vi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vi = vec_andc(vi, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vui = vec_andc(vui, vui); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vui = vec_andc(vbi, vui); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vui = vec_andc(vui, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vf = vec_andc(vf, vf); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vf = vec_andc(vbi, vf); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vf = vec_andc(vf, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vsc = vec_vandc(vsc, vsc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vsc = vec_vandc(vbc, vsc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vsc = vec_vandc(vsc, vbc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vuc = vec_vandc(vuc, vuc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vuc = vec_vandc(vbc, vuc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vuc = vec_vandc(vuc, vbc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vbc = vec_vandc(vbc, vbc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
-
- res_vs = vec_vandc(vs, vs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vs = vec_vandc(vbs, vs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vs = vec_vandc(vs, vbs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vus = vec_vandc(vus, vus); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vus = vec_vandc(vbs, vus); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vus = vec_vandc(vus, vbs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vbs = vec_vandc(vbs, vbs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
-
- res_vi = vec_vandc(vi, vi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vi = vec_vandc(vbi, vi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vi = vec_vandc(vi, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vui = vec_vandc(vui, vui); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vui = vec_vandc(vbi, vui); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vui = vec_vandc(vui, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vf = vec_vandc(vf, vf); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vf = vec_vandc(vbi, vf); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
-
- res_vf = vec_vandc(vf, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
+ res_vsc = vec_andc(vsc, vsc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vsc = vec_andc(vbc, vsc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vsc = vec_andc(vsc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vuc = vec_andc(vuc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vuc = vec_andc(vbc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vuc = vec_andc(vuc, vbc);
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vbc = vec_andc(vbc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vs = vec_andc(vs, vs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vs = vec_andc(vbs, vs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vs = vec_andc(vs, vbs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vus = vec_andc(vus, vus);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vus = vec_andc(vbs, vus);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vus = vec_andc(vus, vbs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vbs = vec_andc(vbs, vbs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vi = vec_andc(vi, vi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vi = vec_andc(vbi, vi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vi = vec_andc(vi, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vui = vec_andc(vui, vui);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vui = vec_andc(vbi, vui);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vui = vec_andc(vui, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vf = vec_andc(vf, vf);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vf = vec_andc(vbi, vf);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vf = vec_andc(vf, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vsc = vec_vandc(vsc, vsc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vsc = vec_vandc(vbc, vsc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vsc = vec_vandc(vsc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vuc = vec_vandc(vuc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vuc = vec_vandc(vbc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vuc = vec_vandc(vuc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vbc = vec_vandc(vbc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+
+ res_vs = vec_vandc(vs, vs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vs = vec_vandc(vbs, vs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vs = vec_vandc(vs, vbs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vus = vec_vandc(vus, vus);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vus = vec_vandc(vbs, vus);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vus = vec_vandc(vus, vbs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vbs = vec_vandc(vbs, vbs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+
+ res_vi = vec_vandc(vi, vi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vi = vec_vandc(vbi, vi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vi = vec_vandc(vi, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vui = vec_vandc(vui, vui);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vui = vec_vandc(vbi, vui);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vui = vec_vandc(vui, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vf = vec_vandc(vf, vf);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vf = vec_vandc(vbi, vf);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+
+ res_vf = vec_vandc(vf, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
}
// CHECK-LABEL: define void @test2
void test2() {
/* vec_avg */
- res_vsc = vec_avg(vsc, vsc); // CHECK: @llvm.ppc.altivec.vavgsb
- res_vuc = vec_avg(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub
- res_vs = vec_avg(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh
- res_vus = vec_avg(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh
- res_vi = vec_avg(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw
- res_vui = vec_avg(vui, vui); // CHECK: @llvm.ppc.altivec.vavguw
- res_vsc = vec_vavgsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vavgsb
- res_vuc = vec_vavgub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub
- res_vs = vec_vavgsh(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh
- res_vus = vec_vavguh(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh
- res_vi = vec_vavgsw(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw
- res_vui = vec_vavguw(vui, vui); // CHECK: @llvm.ppc.altivec.vavguw
+ res_vsc = vec_avg(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vavgsb
+// CHECK-LE: @llvm.ppc.altivec.vavgsb
+
+ res_vuc = vec_avg(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vavgub
+// CHECK-LE: @llvm.ppc.altivec.vavgub
+
+ res_vs = vec_avg(vs, vs);
+// CHECK: @llvm.ppc.altivec.vavgsh
+// CHECK-LE: @llvm.ppc.altivec.vavgsh
+
+ res_vus = vec_avg(vus, vus);
+// CHECK: @llvm.ppc.altivec.vavguh
+// CHECK-LE: @llvm.ppc.altivec.vavguh
+
+ res_vi = vec_avg(vi, vi);
+// CHECK: @llvm.ppc.altivec.vavgsw
+// CHECK-LE: @llvm.ppc.altivec.vavgsw
+
+ res_vui = vec_avg(vui, vui);
+// CHECK: @llvm.ppc.altivec.vavguw
+// CHECK-LE: @llvm.ppc.altivec.vavguw
+
+ res_vsc = vec_vavgsb(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vavgsb
+// CHECK-LE: @llvm.ppc.altivec.vavgsb
+
+ res_vuc = vec_vavgub(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vavgub
+// CHECK-LE: @llvm.ppc.altivec.vavgub
+
+ res_vs = vec_vavgsh(vs, vs);
+// CHECK: @llvm.ppc.altivec.vavgsh
+// CHECK-LE: @llvm.ppc.altivec.vavgsh
+
+ res_vus = vec_vavguh(vus, vus);
+// CHECK: @llvm.ppc.altivec.vavguh
+// CHECK-LE: @llvm.ppc.altivec.vavguh
+
+ res_vi = vec_vavgsw(vi, vi);
+// CHECK: @llvm.ppc.altivec.vavgsw
+// CHECK-LE: @llvm.ppc.altivec.vavgsw
+
+ res_vui = vec_vavguw(vui, vui);
+// CHECK: @llvm.ppc.altivec.vavguw
+// CHECK-LE: @llvm.ppc.altivec.vavguw
/* vec_ceil */
- res_vf = vec_ceil(vf); // CHECK: @llvm.ppc.altivec.vrfip
- res_vf = vec_vrfip(vf); // CHECK: @llvm.ppc.altivec.vrfip
+ res_vf = vec_ceil(vf);
+// CHECK: @llvm.ppc.altivec.vrfip
+// CHECK-LE: @llvm.ppc.altivec.vrfip
+
+ res_vf = vec_vrfip(vf);
+// CHECK: @llvm.ppc.altivec.vrfip
+// CHECK-LE: @llvm.ppc.altivec.vrfip
/* vec_cmpb */
- res_vi = vec_cmpb(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp
- res_vi = vec_vcmpbfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp
+ res_vi = vec_cmpb(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpbfp
+// CHECK-LE: @llvm.ppc.altivec.vcmpbfp
+
+ res_vi = vec_vcmpbfp(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpbfp
+// CHECK-LE: @llvm.ppc.altivec.vcmpbfp
/* vec_cmpeq */
- res_vbc = vec_cmpeq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb
- res_vbc = vec_cmpeq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb
- res_vbs = vec_cmpeq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh
- res_vbs = vec_cmpeq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh
- res_vbi = vec_cmpeq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw
- res_vbi = vec_cmpeq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw
- res_vbi = vec_cmpeq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp
+ res_vbc = vec_cmpeq(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpequb
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb
+
+ res_vbc = vec_cmpeq(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpequb
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb
+
+ res_vbs = vec_cmpeq(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpequh
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh
+
+ res_vbs = vec_cmpeq(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpequh
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh
+
+ res_vbi = vec_cmpeq(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpequw
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw
+
+ res_vbi = vec_cmpeq(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpequw
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw
+
+ res_vbi = vec_cmpeq(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpeqfp
+// CHECK-LE: @llvm.ppc.altivec.vcmpeqfp
/* vec_cmpge */
- res_vbi = vec_cmpge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp
- res_vbi = vec_vcmpgefp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp
+ res_vbi = vec_cmpge(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgefp
+// CHECK-LE: @llvm.ppc.altivec.vcmpgefp
+
+ res_vbi = vec_vcmpgefp(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgefp
+// CHECK-LE: @llvm.ppc.altivec.vcmpgefp
}
// CHECK-LABEL: define void @test5
void test5() {
-
+
/* vec_cmpgt */
- res_vbc = vec_cmpgt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb
- res_vbc = vec_cmpgt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub
- res_vbs = vec_cmpgt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh
- res_vbs = vec_cmpgt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
- res_vbi = vec_cmpgt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
- res_vbi = vec_cmpgt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
- res_vbi = vec_cmpgt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
- res_vbc = vec_vcmpgtsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb
- res_vbc = vec_vcmpgtub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub
- res_vbs = vec_vcmpgtsh(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh
- res_vbs = vec_vcmpgtuh(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
- res_vbi = vec_vcmpgtsw(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
- res_vbi = vec_vcmpgtuw(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
- res_vbi = vec_vcmpgtfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
+ res_vbc = vec_cmpgt(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb
+
+ res_vbc = vec_cmpgt(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub
+
+ res_vbs = vec_cmpgt(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh
+
+ res_vbs = vec_cmpgt(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh
+
+ res_vbi = vec_cmpgt(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw
+
+ res_vbi = vec_cmpgt(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw
+
+ res_vbi = vec_cmpgt(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgtfp
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp
+
+ res_vbc = vec_vcmpgtsb(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb
+
+ res_vbc = vec_vcmpgtub(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub
+
+ res_vbs = vec_vcmpgtsh(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh
+
+ res_vbs = vec_vcmpgtuh(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh
+
+ res_vbi = vec_vcmpgtsw(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw
+
+ res_vbi = vec_vcmpgtuw(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw
+
+ res_vbi = vec_vcmpgtfp(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgtfp
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp
/* vec_cmple */
- res_vbi = vec_cmple(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp
+ res_vbi = vec_cmple(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgefp
+// CHECK-LE: @llvm.ppc.altivec.vcmpgefp
}
// CHECK-LABEL: define void @test6
void test6() {
/* vec_cmplt */
- res_vbc = vec_cmplt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb
- res_vbc = vec_cmplt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub
- res_vbs = vec_cmplt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh
- res_vbs = vec_cmplt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
- res_vbi = vec_cmplt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
- res_vbi = vec_cmplt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
- res_vbi = vec_cmplt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
+ res_vbc = vec_cmplt(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb
+
+ res_vbc = vec_cmplt(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub
+
+ res_vbs = vec_cmplt(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh
+
+ res_vbs = vec_cmplt(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh
+
+ res_vbi = vec_cmplt(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw
+
+ res_vbi = vec_cmplt(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw
+
+ res_vbi = vec_cmplt(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgtfp
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp
/* vec_ctf */
- res_vf = vec_ctf(vi, param_i); // CHECK: @llvm.ppc.altivec.vcfsx
- res_vf = vec_ctf(vui, 0); // CHECK: @llvm.ppc.altivec.vcfux
- res_vf = vec_vcfsx(vi, 0); // CHECK: @llvm.ppc.altivec.vcfsx
- res_vf = vec_vcfux(vui, 0); // CHECK: @llvm.ppc.altivec.vcfux
+ res_vf = vec_ctf(vi, param_i);
+// CHECK: @llvm.ppc.altivec.vcfsx
+// CHECK-LE: @llvm.ppc.altivec.vcfsx
+
+ res_vf = vec_ctf(vui, 0);
+// CHECK: @llvm.ppc.altivec.vcfux
+// CHECK-LE: @llvm.ppc.altivec.vcfux
+
+ res_vf = vec_vcfsx(vi, 0);
+// CHECK: @llvm.ppc.altivec.vcfsx
+// CHECK-LE: @llvm.ppc.altivec.vcfsx
+
+ res_vf = vec_vcfux(vui, 0);
+// CHECK: @llvm.ppc.altivec.vcfux
+// CHECK-LE: @llvm.ppc.altivec.vcfux
/* vec_cts */
- res_vi = vec_cts(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs
- res_vi = vec_vctsxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs
+ res_vi = vec_cts(vf, 0);
+// CHECK: @llvm.ppc.altivec.vctsxs
+// CHECK-LE: @llvm.ppc.altivec.vctsxs
+
+ res_vi = vec_vctsxs(vf, 0);
+// CHECK: @llvm.ppc.altivec.vctsxs
+// CHECK-LE: @llvm.ppc.altivec.vctsxs
/* vec_ctu */
- res_vui = vec_ctu(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs
- res_vui = vec_vctuxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs
+ res_vui = vec_ctu(vf, 0);
+// CHECK: @llvm.ppc.altivec.vctuxs
+// CHECK-LE: @llvm.ppc.altivec.vctuxs
+
+ res_vui = vec_vctuxs(vf, 0);
+// CHECK: @llvm.ppc.altivec.vctuxs
+// CHECK-LE: @llvm.ppc.altivec.vctuxs
/* vec_dss */
- vec_dss(param_i); // CHECK: @llvm.ppc.altivec.dss
+ vec_dss(param_i);
+// CHECK: @llvm.ppc.altivec.dss
+// CHECK-LE: @llvm.ppc.altivec.dss
/* vec_dssall */
- vec_dssall(); // CHECK: @llvm.ppc.altivec.dssall
+ vec_dssall();
+// CHECK: @llvm.ppc.altivec.dssall
+// CHECK-LE: @llvm.ppc.altivec.dssall
/* vec_dst */
- vec_dst(&vsc, 0, 0); // CHECK: @llvm.ppc.altivec.dst
+ vec_dst(&vsc, 0, 0);
+// CHECK: @llvm.ppc.altivec.dst
+// CHECK-LE: @llvm.ppc.altivec.dst
/* vec_dstst */
- vec_dstst(&vs, 0, 0); // CHECK: @llvm.ppc.altivec.dstst
+ vec_dstst(&vs, 0, 0);
+// CHECK: @llvm.ppc.altivec.dstst
+// CHECK-LE: @llvm.ppc.altivec.dstst
/* vec_dststt */
- vec_dststt(¶m_i, 0, 0); // CHECK: @llvm.ppc.altivec.dststt
+ vec_dststt(¶m_i, 0, 0);
+// CHECK: @llvm.ppc.altivec.dststt
+// CHECK-LE: @llvm.ppc.altivec.dststt
/* vec_dstt */
- vec_dstt(&vf, 0, 0); // CHECK: @llvm.ppc.altivec.dstt
+ vec_dstt(&vf, 0, 0);
+// CHECK: @llvm.ppc.altivec.dstt
+// CHECK-LE: @llvm.ppc.altivec.dstt
/* vec_expte */
- res_vf = vec_expte(vf); // CHECK: @llvm.ppc.altivec.vexptefp
- res_vf = vec_vexptefp(vf); // CHECK: @llvm.ppc.altivec.vexptefp
+ res_vf = vec_expte(vf);
+// CHECK: @llvm.ppc.altivec.vexptefp
+// CHECK-LE: @llvm.ppc.altivec.vexptefp
+
+ res_vf = vec_vexptefp(vf);
+// CHECK: @llvm.ppc.altivec.vexptefp
+// CHECK-LE: @llvm.ppc.altivec.vexptefp
/* vec_floor */
- res_vf = vec_floor(vf); // CHECK: @llvm.ppc.altivec.vrfim
- res_vf = vec_vrfim(vf); // CHECK: @llvm.ppc.altivec.vrfim
+ res_vf = vec_floor(vf);
+// CHECK: @llvm.ppc.altivec.vrfim
+// CHECK-LE: @llvm.ppc.altivec.vrfim
+
+ res_vf = vec_vrfim(vf);
+// CHECK: @llvm.ppc.altivec.vrfim
+// CHECK-LE: @llvm.ppc.altivec.vrfim
/* vec_ld */
- res_vsc = vec_ld(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
- res_vsc = vec_ld(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx
- res_vuc = vec_ld(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
- res_vuc = vec_ld(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx
- res_vbc = vec_ld(0, &vbc); // CHECK: @llvm.ppc.altivec.lvx
- res_vs = vec_ld(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
- res_vs = vec_ld(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx
- res_vus = vec_ld(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
- res_vus = vec_ld(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx
- res_vbs = vec_ld(0, &vbs); // CHECK: @llvm.ppc.altivec.lvx
- res_vp = vec_ld(0, &vp); // CHECK: @llvm.ppc.altivec.lvx
- res_vi = vec_ld(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
- res_vi = vec_ld(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx
- res_vui = vec_ld(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
- res_vui = vec_ld(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx
- res_vbi = vec_ld(0, &vbi); // CHECK: @llvm.ppc.altivec.lvx
- res_vf = vec_ld(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
- res_vf = vec_ld(0, ¶m_f); // CHECK: @llvm.ppc.altivec.lvx
- res_vsc = vec_lvx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
- res_vsc = vec_lvx(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx
- res_vuc = vec_lvx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
- res_vuc = vec_lvx(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx
- res_vbc = vec_lvx(0, &vbc); // CHECK: @llvm.ppc.altivec.lvx
- res_vs = vec_lvx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
- res_vs = vec_lvx(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx
- res_vus = vec_lvx(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
- res_vus = vec_lvx(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx
- res_vbs = vec_lvx(0, &vbs); // CHECK: @llvm.ppc.altivec.lvx
- res_vp = vec_lvx(0, &vp); // CHECK: @llvm.ppc.altivec.lvx
- res_vi = vec_lvx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
- res_vi = vec_lvx(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx
- res_vui = vec_lvx(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
- res_vui = vec_lvx(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx
- res_vbi = vec_lvx(0, &vbi); // CHECK: @llvm.ppc.altivec.lvx
- res_vf = vec_lvx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
- res_vf = vec_lvx(0, ¶m_f); // CHECK: @llvm.ppc.altivec.lvx
+ res_vsc = vec_ld(0, &vsc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vsc = vec_ld(0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vuc = vec_ld(0, &vuc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vuc = vec_ld(0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vbc = vec_ld(0, &vbc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vs = vec_ld(0, &vs);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vs = vec_ld(0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vus = vec_ld(0, &vus);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vus = vec_ld(0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vbs = vec_ld(0, &vbs);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vp = vec_ld(0, &vp);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vi = vec_ld(0, &vi);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vi = vec_ld(0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vui = vec_ld(0, &vui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vui = vec_ld(0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vbi = vec_ld(0, &vbi);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vf = vec_ld(0, &vf);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vf = vec_ld(0, ¶m_f);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vsc = vec_lvx(0, &vsc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vsc = vec_lvx(0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vuc = vec_lvx(0, &vuc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vuc = vec_lvx(0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vbc = vec_lvx(0, &vbc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vs = vec_lvx(0, &vs);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vs = vec_lvx(0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vus = vec_lvx(0, &vus);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vus = vec_lvx(0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vbs = vec_lvx(0, &vbs);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vp = vec_lvx(0, &vp);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vi = vec_lvx(0, &vi);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vi = vec_lvx(0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vui = vec_lvx(0, &vui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vui = vec_lvx(0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vbi = vec_lvx(0, &vbi);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vf = vec_lvx(0, &vf);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+
+ res_vf = vec_lvx(0, ¶m_f);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
/* vec_lde */
- res_vsc = vec_lde(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvebx
- res_vuc = vec_lde(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvebx
- res_vs = vec_lde(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvehx
- res_vus = vec_lde(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvehx
- res_vi = vec_lde(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvewx
- res_vui = vec_lde(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvewx
- res_vf = vec_lde(0, ¶m_f); // CHECK: @llvm.ppc.altivec.lvewx
- res_vsc = vec_lvebx(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvebx
- res_vuc = vec_lvebx(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvebx
- res_vs = vec_lvehx(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvehx
- res_vus = vec_lvehx(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvehx
- res_vi = vec_lvewx(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvewx
- res_vui = vec_lvewx(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvewx
- res_vf = vec_lvewx(0, ¶m_f); // CHECK: @llvm.ppc.altivec.lvewx
+ res_vsc = vec_lde(0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvebx
+// CHECK-LE: @llvm.ppc.altivec.lvebx
+
+ res_vuc = vec_lde(0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvebx
+// CHECK-LE: @llvm.ppc.altivec.lvebx
+
+ res_vs = vec_lde(0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvehx
+// CHECK-LE: @llvm.ppc.altivec.lvehx
+
+ res_vus = vec_lde(0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvehx
+// CHECK-LE: @llvm.ppc.altivec.lvehx
+
+ res_vi = vec_lde(0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvewx
+// CHECK-LE: @llvm.ppc.altivec.lvewx
+
+ res_vui = vec_lde(0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvewx
+// CHECK-LE: @llvm.ppc.altivec.lvewx
+
+ res_vf = vec_lde(0, ¶m_f);
+// CHECK: @llvm.ppc.altivec.lvewx
+// CHECK-LE: @llvm.ppc.altivec.lvewx
+
+ res_vsc = vec_lvebx(0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvebx
+// CHECK-LE: @llvm.ppc.altivec.lvebx
+
+ res_vuc = vec_lvebx(0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvebx
+// CHECK-LE: @llvm.ppc.altivec.lvebx
+
+ res_vs = vec_lvehx(0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvehx
+// CHECK-LE: @llvm.ppc.altivec.lvehx
+
+ res_vus = vec_lvehx(0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvehx
+// CHECK-LE: @llvm.ppc.altivec.lvehx
+
+ res_vi = vec_lvewx(0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvewx
+// CHECK-LE: @llvm.ppc.altivec.lvewx
+
+ res_vui = vec_lvewx(0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvewx
+// CHECK-LE: @llvm.ppc.altivec.lvewx
+
+ res_vf = vec_lvewx(0, ¶m_f);
+// CHECK: @llvm.ppc.altivec.lvewx
+// CHECK-LE: @llvm.ppc.altivec.lvewx
/* vec_ldl */
- res_vsc = vec_ldl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl
- res_vsc = vec_ldl(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvxl
- res_vuc = vec_ldl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
- res_vuc = vec_ldl(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvxl
- res_vbc = vec_ldl(0, &vbc); // CHECK: @llvm.ppc.altivec.lvxl
- res_vs = vec_ldl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
- res_vs = vec_ldl(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvxl
- res_vus = vec_ldl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
- res_vus = vec_ldl(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvxl
- res_vbs = vec_ldl(0, &vbs); // CHECK: @llvm.ppc.altivec.lvxl
- res_vp = vec_ldl(0, &vp); // CHECK: @llvm.ppc.altivec.lvxl
- res_vi = vec_ldl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
- res_vi = vec_ldl(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvxl
- res_vui = vec_ldl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
- res_vui = vec_ldl(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvxl
- res_vbi = vec_ldl(0, &vbi); // CHECK: @llvm.ppc.altivec.lvxl
- res_vf = vec_ldl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
- res_vf = vec_ldl(0, ¶m_f); // CHECK: @llvm.ppc.altivec.lvxl
- res_vsc = vec_lvxl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl
- res_vsc = vec_lvxl(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvxl
- res_vuc = vec_lvxl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
- res_vbc = vec_lvxl(0, &vbc); // CHECK: @llvm.ppc.altivec.lvxl
- res_vuc = vec_lvxl(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvxl
- res_vs = vec_lvxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
- res_vs = vec_lvxl(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvxl
- res_vus = vec_lvxl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
- res_vus = vec_lvxl(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvxl
- res_vbs = vec_lvxl(0, &vbs); // CHECK: @llvm.ppc.altivec.lvxl
- res_vp = vec_lvxl(0, &vp); // CHECK: @llvm.ppc.altivec.lvxl
- res_vi = vec_lvxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
- res_vi = vec_lvxl(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvxl
- res_vui = vec_lvxl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
- res_vui = vec_lvxl(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvxl
- res_vbi = vec_lvxl(0, &vbi); // CHECK: @llvm.ppc.altivec.lvxl
- res_vf = vec_lvxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
- res_vf = vec_lvxl(0, ¶m_f); // CHECK: @llvm.ppc.altivec.lvxl
+ res_vsc = vec_ldl(0, &vsc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vsc = vec_ldl(0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vuc = vec_ldl(0, &vuc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vuc = vec_ldl(0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vbc = vec_ldl(0, &vbc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vs = vec_ldl(0, &vs);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vs = vec_ldl(0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vus = vec_ldl(0, &vus);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vus = vec_ldl(0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vbs = vec_ldl(0, &vbs);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vp = vec_ldl(0, &vp);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vi = vec_ldl(0, &vi);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vi = vec_ldl(0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vui = vec_ldl(0, &vui);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vui = vec_ldl(0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vbi = vec_ldl(0, &vbi);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vf = vec_ldl(0, &vf);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vf = vec_ldl(0, ¶m_f);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vsc = vec_lvxl(0, &vsc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vsc = vec_lvxl(0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vuc = vec_lvxl(0, &vuc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vbc = vec_lvxl(0, &vbc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vuc = vec_lvxl(0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vs = vec_lvxl(0, &vs);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vs = vec_lvxl(0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vus = vec_lvxl(0, &vus);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vus = vec_lvxl(0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vbs = vec_lvxl(0, &vbs);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vp = vec_lvxl(0, &vp);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vi = vec_lvxl(0, &vi);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vi = vec_lvxl(0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vui = vec_lvxl(0, &vui);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vui = vec_lvxl(0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vbi = vec_lvxl(0, &vbi);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vf = vec_lvxl(0, &vf);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+
+ res_vf = vec_lvxl(0, ¶m_f);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvxl
/* vec_loge */
- res_vf = vec_loge(vf); // CHECK: @llvm.ppc.altivec.vlogefp
- res_vf = vec_vlogefp(vf); // CHECK: @llvm.ppc.altivec.vlogefp
+ res_vf = vec_loge(vf);
+// CHECK: @llvm.ppc.altivec.vlogefp
+// CHECK-LE: @llvm.ppc.altivec.vlogefp
+
+ res_vf = vec_vlogefp(vf);
+// CHECK: @llvm.ppc.altivec.vlogefp
+// CHECK-LE: @llvm.ppc.altivec.vlogefp
/* vec_lvsl */
- res_vuc = vec_lvsl(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvsl
+ res_vuc = vec_lvsl(0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
/* vec_lvsr */
- res_vuc = vec_lvsr(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvsr
+ res_vuc = vec_lvsr(0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.lvsr
/* vec_madd */
- res_vf =vec_madd(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp
- res_vf = vec_vmaddfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp
+ res_vf =vec_madd(vf, vf, vf);
+// CHECK: @llvm.ppc.altivec.vmaddfp
+// CHECK-LE: @llvm.ppc.altivec.vmaddfp
+
+ res_vf = vec_vmaddfp(vf, vf, vf);
+// CHECK: @llvm.ppc.altivec.vmaddfp
+// CHECK-LE: @llvm.ppc.altivec.vmaddfp
/* vec_madds */
- res_vs = vec_madds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs
- res_vs = vec_vmhaddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs
+ res_vs = vec_madds(vs, vs, vs);
+// CHECK: @llvm.ppc.altivec.vmhaddshs
+// CHECK-LE: @llvm.ppc.altivec.vmhaddshs
+
+ res_vs = vec_vmhaddshs(vs, vs, vs);
+// CHECK: @llvm.ppc.altivec.vmhaddshs
+// CHECK-LE: @llvm.ppc.altivec.vmhaddshs
/* vec_max */
- res_vsc = vec_max(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
- res_vsc = vec_max(vbc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
- res_vsc = vec_max(vsc, vbc); // CHECK: @llvm.ppc.altivec.vmaxsb
- res_vuc = vec_max(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
- res_vuc = vec_max(vbc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
- res_vuc = vec_max(vuc, vbc); // CHECK: @llvm.ppc.altivec.vmaxub
- res_vs = vec_max(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
- res_vs = vec_max(vbs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
- res_vs = vec_max(vs, vbs); // CHECK: @llvm.ppc.altivec.vmaxsh
- res_vus = vec_max(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
- res_vus = vec_max(vbs, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
- res_vus = vec_max(vus, vbs); // CHECK: @llvm.ppc.altivec.vmaxuh
- res_vi = vec_max(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
- res_vi = vec_max(vbi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
- res_vi = vec_max(vi, vbi); // CHECK: @llvm.ppc.altivec.vmaxsw
- res_vui = vec_max(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
- res_vui = vec_max(vbi, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
- res_vui = vec_max(vui, vbi); // CHECK: @llvm.ppc.altivec.vmaxuw
- res_vf = vec_max(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
- res_vsc = vec_vmaxsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
- res_vsc = vec_vmaxsb(vbc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
- res_vsc = vec_vmaxsb(vsc, vbc); // CHECK: @llvm.ppc.altivec.vmaxsb
- res_vuc = vec_vmaxub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
- res_vuc = vec_vmaxub(vbc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
- res_vuc = vec_vmaxub(vuc, vbc); // CHECK: @llvm.ppc.altivec.vmaxub
- res_vs = vec_vmaxsh(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
- res_vs = vec_vmaxsh(vbs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
- res_vs = vec_vmaxsh(vs, vbs); // CHECK: @llvm.ppc.altivec.vmaxsh
- res_vus = vec_vmaxuh(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
- res_vus = vec_vmaxuh(vbs, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
- res_vus = vec_vmaxuh(vus, vbs); // CHECK: @llvm.ppc.altivec.vmaxuh
- res_vi = vec_vmaxsw(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
- res_vi = vec_vmaxsw(vbi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
- res_vi = vec_vmaxsw(vi, vbi); // CHECK: @llvm.ppc.altivec.vmaxsw
- res_vui = vec_vmaxuw(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
- res_vui = vec_vmaxuw(vbi, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
- res_vui = vec_vmaxuw(vui, vbi); // CHECK: @llvm.ppc.altivec.vmaxuw
- res_vf = vec_vmaxfp(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
+ res_vsc = vec_max(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vmaxsb
+// CHECK-LE: @llvm.ppc.altivec.vmaxsb
+
+ res_vsc = vec_max(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vmaxsb
+// CHECK-LE: @llvm.ppc.altivec.vmaxsb
+
+ res_vsc = vec_max(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vmaxsb
+// CHECK-LE: @llvm.ppc.altivec.vmaxsb
+
+ res_vuc = vec_max(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vmaxub
+// CHECK-LE: @llvm.ppc.altivec.vmaxub
+
+ res_vuc = vec_max(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vmaxub
+// CHECK-LE: @llvm.ppc.altivec.vmaxub
+
+ res_vuc = vec_max(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vmaxub
+// CHECK-LE: @llvm.ppc.altivec.vmaxub
+
+ res_vs = vec_max(vs, vs);
+// CHECK: @llvm.ppc.altivec.vmaxsh
+// CHECK-LE: @llvm.ppc.altivec.vmaxsh
+
+ res_vs = vec_max(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vmaxsh
+// CHECK-LE: @llvm.ppc.altivec.vmaxsh
+
+ res_vs = vec_max(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vmaxsh
+// CHECK-LE: @llvm.ppc.altivec.vmaxsh
+
+ res_vus = vec_max(vus, vus);
+// CHECK: @llvm.ppc.altivec.vmaxuh
+// CHECK-LE: @llvm.ppc.altivec.vmaxuh
+
+ res_vus = vec_max(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vmaxuh
+// CHECK-LE: @llvm.ppc.altivec.vmaxuh
+
+ res_vus = vec_max(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vmaxuh
+// CHECK-LE: @llvm.ppc.altivec.vmaxuh
+
+ res_vi = vec_max(vi, vi);
+// CHECK: @llvm.ppc.altivec.vmaxsw
+// CHECK-LE: @llvm.ppc.altivec.vmaxsw
+
+ res_vi = vec_max(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vmaxsw
+// CHECK-LE: @llvm.ppc.altivec.vmaxsw
+
+ res_vi = vec_max(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vmaxsw
+// CHECK-LE: @llvm.ppc.altivec.vmaxsw
+
+ res_vui = vec_max(vui, vui);
+// CHECK: @llvm.ppc.altivec.vmaxuw
+// CHECK-LE: @llvm.ppc.altivec.vmaxuw
+
+ res_vui = vec_max(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vmaxuw
+// CHECK-LE: @llvm.ppc.altivec.vmaxuw
+
+ res_vui = vec_max(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vmaxuw
+// CHECK-LE: @llvm.ppc.altivec.vmaxuw
+
+ res_vf = vec_max(vf, vf);
+// CHECK: @llvm.ppc.altivec.vmaxfp
+// CHECK-LE: @llvm.ppc.altivec.vmaxfp
+
+ res_vsc = vec_vmaxsb(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vmaxsb
+// CHECK-LE: @llvm.ppc.altivec.vmaxsb
+
+ res_vsc = vec_vmaxsb(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vmaxsb
+// CHECK-LE: @llvm.ppc.altivec.vmaxsb
+
+ res_vsc = vec_vmaxsb(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vmaxsb
+// CHECK-LE: @llvm.ppc.altivec.vmaxsb
+
+ res_vuc = vec_vmaxub(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vmaxub
+// CHECK-LE: @llvm.ppc.altivec.vmaxub
+
+ res_vuc = vec_vmaxub(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vmaxub
+// CHECK-LE: @llvm.ppc.altivec.vmaxub
+
+ res_vuc = vec_vmaxub(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vmaxub
+// CHECK-LE: @llvm.ppc.altivec.vmaxub
+
+ res_vs = vec_vmaxsh(vs, vs);
+// CHECK: @llvm.ppc.altivec.vmaxsh
+// CHECK-LE: @llvm.ppc.altivec.vmaxsh
+
+ res_vs = vec_vmaxsh(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vmaxsh
+// CHECK-LE: @llvm.ppc.altivec.vmaxsh
+
+ res_vs = vec_vmaxsh(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vmaxsh
+// CHECK-LE: @llvm.ppc.altivec.vmaxsh
+
+ res_vus = vec_vmaxuh(vus, vus);
+// CHECK: @llvm.ppc.altivec.vmaxuh
+// CHECK-LE: @llvm.ppc.altivec.vmaxuh
+
+ res_vus = vec_vmaxuh(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vmaxuh
+// CHECK-LE: @llvm.ppc.altivec.vmaxuh
+
+ res_vus = vec_vmaxuh(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vmaxuh
+// CHECK-LE: @llvm.ppc.altivec.vmaxuh
+
+ res_vi = vec_vmaxsw(vi, vi);
+// CHECK: @llvm.ppc.altivec.vmaxsw
+// CHECK-LE: @llvm.ppc.altivec.vmaxsw
+
+ res_vi = vec_vmaxsw(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vmaxsw
+// CHECK-LE: @llvm.ppc.altivec.vmaxsw
+
+ res_vi = vec_vmaxsw(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vmaxsw
+// CHECK-LE: @llvm.ppc.altivec.vmaxsw
+
+ res_vui = vec_vmaxuw(vui, vui);
+// CHECK: @llvm.ppc.altivec.vmaxuw
+// CHECK-LE: @llvm.ppc.altivec.vmaxuw
+
+ res_vui = vec_vmaxuw(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vmaxuw
+// CHECK-LE: @llvm.ppc.altivec.vmaxuw
+
+ res_vui = vec_vmaxuw(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vmaxuw
+// CHECK-LE: @llvm.ppc.altivec.vmaxuw
+
+ res_vf = vec_vmaxfp(vf, vf);
+// CHECK: @llvm.ppc.altivec.vmaxfp
+// CHECK-LE: @llvm.ppc.altivec.vmaxfp
/* vec_mergeh */
- res_vsc = vec_mergeh(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
- res_vuc = vec_mergeh(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vbc = vec_mergeh(vbc, vbc); // CHECK: @llvm.ppc.altivec.vperm
- res_vs = vec_mergeh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
- res_vp = vec_mergeh(vp, vp); // CHECK: @llvm.ppc.altivec.vperm
- res_vus = vec_mergeh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
- res_vbs = vec_mergeh(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
- res_vi = vec_mergeh(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
- res_vui = vec_mergeh(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
- res_vbi = vec_mergeh(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
- res_vf = vec_mergeh(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
- res_vsc = vec_vmrghb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
- res_vuc = vec_vmrghb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vbc = vec_vmrghb(vbc, vbc); // CHECK: @llvm.ppc.altivec.vperm
- res_vs = vec_vmrghh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
- res_vp = vec_vmrghh(vp, vp); // CHECK: @llvm.ppc.altivec.vperm
- res_vus = vec_vmrghh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
- res_vbs = vec_vmrghh(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
- res_vi = vec_vmrghw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
- res_vui = vec_vmrghw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
- res_vbi = vec_vmrghw(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
- res_vf = vec_vmrghw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
+ res_vsc = vec_mergeh(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_mergeh(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_mergeh(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_mergeh(vs, vs);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_mergeh(vp, vp);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_mergeh(vus, vus);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_mergeh(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_mergeh(vi, vi);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_mergeh(vui, vui);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbi = vec_mergeh(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_mergeh(vf, vf);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsc = vec_vmrghb(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_vmrghb(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_vmrghb(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_vmrghh(vs, vs);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_vmrghh(vp, vp);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_vmrghh(vus, vus);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_vmrghh(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_vmrghw(vi, vi);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_vmrghw(vui, vui);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbi = vec_vmrghw(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_vmrghw(vf, vf);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_mergel */
- res_vsc = vec_mergel(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
- res_vuc = vec_mergel(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vbc = vec_mergel(vbc, vbc); // CHECK: @llvm.ppc.altivec.vperm
- res_vs = vec_mergel(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
- res_vp = vec_mergeh(vp, vp); // CHECK: @llvm.ppc.altivec.vperm
- res_vus = vec_mergel(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
- res_vbs = vec_mergel(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
- res_vi = vec_mergel(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
- res_vui = vec_mergel(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
- res_vbi = vec_mergel(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
- res_vf = vec_mergel(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
- res_vsc = vec_vmrglb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
- res_vuc = vec_vmrglb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vbc = vec_vmrglb(vbc, vbc); // CHECK: @llvm.ppc.altivec.vperm
- res_vs = vec_vmrglh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
- res_vp = vec_vmrglh(vp, vp); // CHECK: @llvm.ppc.altivec.vperm
- res_vus = vec_vmrglh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
- res_vbs = vec_vmrglh(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
- res_vi = vec_vmrglw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
- res_vui = vec_vmrglw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
- res_vbi = vec_vmrglw(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
- res_vf = vec_vmrglw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
+ res_vsc = vec_mergel(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_mergel(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_mergel(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_mergel(vs, vs);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_mergeh(vp, vp);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_mergel(vus, vus);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_mergel(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_mergel(vi, vi);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_mergel(vui, vui);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbi = vec_mergel(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_mergel(vf, vf);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsc = vec_vmrglb(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_vmrglb(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_vmrglb(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_vmrglh(vs, vs);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_vmrglh(vp, vp);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_vmrglh(vus, vus);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_vmrglh(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_vmrglw(vi, vi);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_vmrglw(vui, vui);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbi = vec_vmrglw(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_vmrglw(vf, vf);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_mfvscr */
- vus = vec_mfvscr(); // CHECK: @llvm.ppc.altivec.mfvscr
+ vus = vec_mfvscr();
+// CHECK: @llvm.ppc.altivec.mfvscr
+// CHECK-LE: @llvm.ppc.altivec.mfvscr
/* vec_min */
- res_vsc = vec_min(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
- res_vsc = vec_min(vbc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
- res_vsc = vec_min(vsc, vbc); // CHECK: @llvm.ppc.altivec.vminsb
- res_vuc = vec_min(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub
- res_vuc = vec_min(vbc, vuc); // CHECK: @llvm.ppc.altivec.vminub
- res_vuc = vec_min(vuc, vbc); // CHECK: @llvm.ppc.altivec.vminub
- res_vs = vec_min(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh
- res_vs = vec_min(vbs, vs); // CHECK: @llvm.ppc.altivec.vminsh
- res_vs = vec_min(vs, vbs); // CHECK: @llvm.ppc.altivec.vminsh
- res_vus = vec_min(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh
- res_vus = vec_min(vbs, vus); // CHECK: @llvm.ppc.altivec.vminuh
- res_vus = vec_min(vus, vbs); // CHECK: @llvm.ppc.altivec.vminuh
- res_vi = vec_min(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw
- res_vi = vec_min(vbi, vi); // CHECK: @llvm.ppc.altivec.vminsw
- res_vi = vec_min(vi, vbi); // CHECK: @llvm.ppc.altivec.vminsw
- res_vui = vec_min(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw
- res_vui = vec_min(vbi, vui); // CHECK: @llvm.ppc.altivec.vminuw
- res_vui = vec_min(vui, vbi); // CHECK: @llvm.ppc.altivec.vminuw
- res_vf = vec_min(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
- res_vsc = vec_vminsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
- res_vsc = vec_vminsb(vbc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
- res_vsc = vec_vminsb(vsc, vbc); // CHECK: @llvm.ppc.altivec.vminsb
- res_vuc = vec_vminub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub
- res_vuc = vec_vminub(vbc, vuc); // CHECK: @llvm.ppc.altivec.vminub
- res_vuc = vec_vminub(vuc, vbc); // CHECK: @llvm.ppc.altivec.vminub
- res_vs = vec_vminsh(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh
- res_vs = vec_vminsh(vbs, vs); // CHECK: @llvm.ppc.altivec.vminsh
- res_vs = vec_vminsh(vs, vbs); // CHECK: @llvm.ppc.altivec.vminsh
- res_vus = vec_vminuh(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh
- res_vus = vec_vminuh(vbs, vus); // CHECK: @llvm.ppc.altivec.vminuh
- res_vus = vec_vminuh(vus, vbs); // CHECK: @llvm.ppc.altivec.vminuh
- res_vi = vec_vminsw(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw
- res_vi = vec_vminsw(vbi, vi); // CHECK: @llvm.ppc.altivec.vminsw
- res_vi = vec_vminsw(vi, vbi); // CHECK: @llvm.ppc.altivec.vminsw
- res_vui = vec_vminuw(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw
- res_vui = vec_vminuw(vbi, vui); // CHECK: @llvm.ppc.altivec.vminuw
- res_vui = vec_vminuw(vui, vbi); // CHECK: @llvm.ppc.altivec.vminuw
- res_vf = vec_vminfp(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
+ res_vsc = vec_min(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vminsb
+// CHECK-LE: @llvm.ppc.altivec.vminsb
+
+ res_vsc = vec_min(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vminsb
+// CHECK-LE: @llvm.ppc.altivec.vminsb
+
+ res_vsc = vec_min(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vminsb
+// CHECK-LE: @llvm.ppc.altivec.vminsb
+
+ res_vuc = vec_min(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vminub
+// CHECK-LE: @llvm.ppc.altivec.vminub
+
+ res_vuc = vec_min(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vminub
+// CHECK-LE: @llvm.ppc.altivec.vminub
+
+ res_vuc = vec_min(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vminub
+// CHECK-LE: @llvm.ppc.altivec.vminub
+
+ res_vs = vec_min(vs, vs);
+// CHECK: @llvm.ppc.altivec.vminsh
+// CHECK-LE: @llvm.ppc.altivec.vminsh
+
+ res_vs = vec_min(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vminsh
+// CHECK-LE: @llvm.ppc.altivec.vminsh
+
+ res_vs = vec_min(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vminsh
+// CHECK-LE: @llvm.ppc.altivec.vminsh
+
+ res_vus = vec_min(vus, vus);
+// CHECK: @llvm.ppc.altivec.vminuh
+// CHECK-LE: @llvm.ppc.altivec.vminuh
+
+ res_vus = vec_min(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vminuh
+// CHECK-LE: @llvm.ppc.altivec.vminuh
+
+ res_vus = vec_min(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vminuh
+// CHECK-LE: @llvm.ppc.altivec.vminuh
+
+ res_vi = vec_min(vi, vi);
+// CHECK: @llvm.ppc.altivec.vminsw
+// CHECK-LE: @llvm.ppc.altivec.vminsw
+
+ res_vi = vec_min(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vminsw
+// CHECK-LE: @llvm.ppc.altivec.vminsw
+
+ res_vi = vec_min(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vminsw
+// CHECK-LE: @llvm.ppc.altivec.vminsw
+
+ res_vui = vec_min(vui, vui);
+// CHECK: @llvm.ppc.altivec.vminuw
+// CHECK-LE: @llvm.ppc.altivec.vminuw
+
+ res_vui = vec_min(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vminuw
+// CHECK-LE: @llvm.ppc.altivec.vminuw
+
+ res_vui = vec_min(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vminuw
+// CHECK-LE: @llvm.ppc.altivec.vminuw
+
+ res_vf = vec_min(vf, vf);
+// CHECK: @llvm.ppc.altivec.vminfp
+// CHECK-LE: @llvm.ppc.altivec.vminfp
+
+ res_vsc = vec_vminsb(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vminsb
+// CHECK-LE: @llvm.ppc.altivec.vminsb
+
+ res_vsc = vec_vminsb(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vminsb
+// CHECK-LE: @llvm.ppc.altivec.vminsb
+
+ res_vsc = vec_vminsb(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vminsb
+// CHECK-LE: @llvm.ppc.altivec.vminsb
+
+ res_vuc = vec_vminub(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vminub
+// CHECK-LE: @llvm.ppc.altivec.vminub
+
+ res_vuc = vec_vminub(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vminub
+// CHECK-LE: @llvm.ppc.altivec.vminub
+
+ res_vuc = vec_vminub(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vminub
+// CHECK-LE: @llvm.ppc.altivec.vminub
+
+ res_vs = vec_vminsh(vs, vs);
+// CHECK: @llvm.ppc.altivec.vminsh
+// CHECK-LE: @llvm.ppc.altivec.vminsh
+
+ res_vs = vec_vminsh(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vminsh
+// CHECK-LE: @llvm.ppc.altivec.vminsh
+
+ res_vs = vec_vminsh(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vminsh
+// CHECK-LE: @llvm.ppc.altivec.vminsh
+
+ res_vus = vec_vminuh(vus, vus);
+// CHECK: @llvm.ppc.altivec.vminuh
+// CHECK-LE: @llvm.ppc.altivec.vminuh
+
+ res_vus = vec_vminuh(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vminuh
+// CHECK-LE: @llvm.ppc.altivec.vminuh
+
+ res_vus = vec_vminuh(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vminuh
+// CHECK-LE: @llvm.ppc.altivec.vminuh
+
+ res_vi = vec_vminsw(vi, vi);
+// CHECK: @llvm.ppc.altivec.vminsw
+// CHECK-LE: @llvm.ppc.altivec.vminsw
+
+ res_vi = vec_vminsw(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vminsw
+// CHECK-LE: @llvm.ppc.altivec.vminsw
+
+ res_vi = vec_vminsw(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vminsw
+// CHECK-LE: @llvm.ppc.altivec.vminsw
+
+ res_vui = vec_vminuw(vui, vui);
+// CHECK: @llvm.ppc.altivec.vminuw
+// CHECK-LE: @llvm.ppc.altivec.vminuw
+
+ res_vui = vec_vminuw(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vminuw
+// CHECK-LE: @llvm.ppc.altivec.vminuw
+
+ res_vui = vec_vminuw(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vminuw
+// CHECK-LE: @llvm.ppc.altivec.vminuw
+
+ res_vf = vec_vminfp(vf, vf);
+// CHECK: @llvm.ppc.altivec.vminfp
+// CHECK-LE: @llvm.ppc.altivec.vminfp
/* vec_mladd */
- res_vus = vec_mladd(vus, vus, vus); // CHECK: mul <8 x i16>
- // CHECK: add <8 x i16>
-
- res_vs = vec_mladd(vus, vs, vs); // CHECK: mul <8 x i16>
- // CHECK: add <8 x i16>
-
- res_vs = vec_mladd(vs, vus, vus); // CHECK: mul <8 x i16>
- // CHECK: add <8 x i16>
-
- res_vs = vec_mladd(vs, vs, vs); // CHECK: mul <8 x i16>
- // CHECK: add <8 x i16>
+ res_vus = vec_mladd(vus, vus, vus);
+// CHECK: mul <8 x i16>
+// CHECK: add <8 x i16>
+// CHECK-LE: mul <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vs = vec_mladd(vus, vs, vs);
+// CHECK: mul <8 x i16>
+// CHECK: add <8 x i16>
+// CHECK-LE: mul <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vs = vec_mladd(vs, vus, vus);
+// CHECK: mul <8 x i16>
+// CHECK: add <8 x i16>
+// CHECK-LE: mul <8 x i16>
+// CHECK-LE: add <8 x i16>
+
+ res_vs = vec_mladd(vs, vs, vs);
+// CHECK: mul <8 x i16>
+// CHECK: add <8 x i16>
+// CHECK-LE: mul <8 x i16>
+// CHECK-LE: add <8 x i16>
/* vec_mradds */
- res_vs = vec_mradds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs
- res_vs = vec_vmhraddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs
-
+ res_vs = vec_mradds(vs, vs, vs);
+// CHECK: @llvm.ppc.altivec.vmhraddshs
+// CHECK-LE: @llvm.ppc.altivec.vmhraddshs
+
+ res_vs = vec_vmhraddshs(vs, vs, vs);
+// CHECK: @llvm.ppc.altivec.vmhraddshs
+// CHECK-LE: @llvm.ppc.altivec.vmhraddshs
+
/* vec_msum */
- res_vi = vec_msum(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm
- res_vui = vec_msum(vuc, vuc, vui); // CHECK: @llvm.ppc.altivec.vmsumubm
- res_vi = vec_msum(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm
- res_vui = vec_msum(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhm
- res_vi = vec_vmsummbm(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm
- res_vui = vec_vmsumubm(vuc, vuc, vui); // CHECK: @llvm.ppc.altivec.vmsumubm
- res_vi = vec_vmsumshm(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm
- res_vui = vec_vmsumuhm(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhm
+ res_vi = vec_msum(vsc, vuc, vi);
+// CHECK: @llvm.ppc.altivec.vmsummbm
+// CHECK-LE: @llvm.ppc.altivec.vmsummbm
+
+ res_vui = vec_msum(vuc, vuc, vui);
+// CHECK: @llvm.ppc.altivec.vmsumubm
+// CHECK-LE: @llvm.ppc.altivec.vmsumubm
+
+ res_vi = vec_msum(vs, vs, vi);
+// CHECK: @llvm.ppc.altivec.vmsumshm
+// CHECK-LE: @llvm.ppc.altivec.vmsumshm
+
+ res_vui = vec_msum(vus, vus, vui);
+// CHECK: @llvm.ppc.altivec.vmsumuhm
+// CHECK-LE: @llvm.ppc.altivec.vmsumuhm
+
+ res_vi = vec_vmsummbm(vsc, vuc, vi);
+// CHECK: @llvm.ppc.altivec.vmsummbm
+// CHECK-LE: @llvm.ppc.altivec.vmsummbm
+
+ res_vui = vec_vmsumubm(vuc, vuc, vui);
+// CHECK: @llvm.ppc.altivec.vmsumubm
+// CHECK-LE: @llvm.ppc.altivec.vmsumubm
+
+ res_vi = vec_vmsumshm(vs, vs, vi);
+// CHECK: @llvm.ppc.altivec.vmsumshm
+// CHECK-LE: @llvm.ppc.altivec.vmsumshm
+
+ res_vui = vec_vmsumuhm(vus, vus, vui);
+// CHECK: @llvm.ppc.altivec.vmsumuhm
+// CHECK-LE: @llvm.ppc.altivec.vmsumuhm
/* vec_msums */
- res_vi = vec_msums(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs
- res_vui = vec_msums(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhs
- res_vi = vec_vmsumshs(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs
- res_vui = vec_vmsumuhs(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhs
+ res_vi = vec_msums(vs, vs, vi);
+// CHECK: @llvm.ppc.altivec.vmsumshs
+// CHECK-LE: @llvm.ppc.altivec.vmsumshs
+
+ res_vui = vec_msums(vus, vus, vui);
+// CHECK: @llvm.ppc.altivec.vmsumuhs
+// CHECK-LE: @llvm.ppc.altivec.vmsumuhs
+
+ res_vi = vec_vmsumshs(vs, vs, vi);
+// CHECK: @llvm.ppc.altivec.vmsumshs
+// CHECK-LE: @llvm.ppc.altivec.vmsumshs
+
+ res_vui = vec_vmsumuhs(vus, vus, vui);
+// CHECK: @llvm.ppc.altivec.vmsumuhs
+// CHECK-LE: @llvm.ppc.altivec.vmsumuhs
/* vec_mtvscr */
- vec_mtvscr(vsc); // CHECK: @llvm.ppc.altivec.mtvscr
- vec_mtvscr(vuc); // CHECK: @llvm.ppc.altivec.mtvscr
- vec_mtvscr(vbc); // CHECK: @llvm.ppc.altivec.mtvscr
- vec_mtvscr(vs); // CHECK: @llvm.ppc.altivec.mtvscr
- vec_mtvscr(vus); // CHECK: @llvm.ppc.altivec.mtvscr
- vec_mtvscr(vbs); // CHECK: @llvm.ppc.altivec.mtvscr
- vec_mtvscr(vp); // CHECK: @llvm.ppc.altivec.mtvscr
- vec_mtvscr(vi); // CHECK: @llvm.ppc.altivec.mtvscr
- vec_mtvscr(vui); // CHECK: @llvm.ppc.altivec.mtvscr
- vec_mtvscr(vbi); // CHECK: @llvm.ppc.altivec.mtvscr
+ vec_mtvscr(vsc);
+// CHECK: @llvm.ppc.altivec.mtvscr
+// CHECK-LE: @llvm.ppc.altivec.mtvscr
+
+ vec_mtvscr(vuc);
+// CHECK: @llvm.ppc.altivec.mtvscr
+// CHECK-LE: @llvm.ppc.altivec.mtvscr
+
+ vec_mtvscr(vbc);
+// CHECK: @llvm.ppc.altivec.mtvscr
+// CHECK-LE: @llvm.ppc.altivec.mtvscr
+
+ vec_mtvscr(vs);
+// CHECK: @llvm.ppc.altivec.mtvscr
+// CHECK-LE: @llvm.ppc.altivec.mtvscr
+
+ vec_mtvscr(vus);
+// CHECK: @llvm.ppc.altivec.mtvscr
+// CHECK-LE: @llvm.ppc.altivec.mtvscr
+
+ vec_mtvscr(vbs);
+// CHECK: @llvm.ppc.altivec.mtvscr
+// CHECK-LE: @llvm.ppc.altivec.mtvscr
+
+ vec_mtvscr(vp);
+// CHECK: @llvm.ppc.altivec.mtvscr
+// CHECK-LE: @llvm.ppc.altivec.mtvscr
+
+ vec_mtvscr(vi);
+// CHECK: @llvm.ppc.altivec.mtvscr
+// CHECK-LE: @llvm.ppc.altivec.mtvscr
+
+ vec_mtvscr(vui);
+// CHECK: @llvm.ppc.altivec.mtvscr
+// CHECK-LE: @llvm.ppc.altivec.mtvscr
+
+ vec_mtvscr(vbi);
+// CHECK: @llvm.ppc.altivec.mtvscr
+// CHECK-LE: @llvm.ppc.altivec.mtvscr
/* vec_mule */
- res_vs = vec_mule(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulesb
- res_vus = vec_mule(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuleub
- res_vi = vec_mule(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh
- res_vui = vec_mule(vus, vus); // CHECK: @llvm.ppc.altivec.vmuleuh
- res_vs = vec_vmulesb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulesb
- res_vus = vec_vmuleub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuleub
- res_vi = vec_vmulesh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh
- res_vui = vec_vmuleuh(vus, vus); // CHECK: @llvm.ppc.altivec.vmuleuh
+ res_vs = vec_mule(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vmulesb
+// CHECK-LE: @llvm.ppc.altivec.vmulosb
+
+ res_vus = vec_mule(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vmuleub
+// CHECK-LE: @llvm.ppc.altivec.vmuloub
+
+ res_vi = vec_mule(vs, vs);
+// CHECK: @llvm.ppc.altivec.vmulesh
+// CHECK-LE: @llvm.ppc.altivec.vmulosh
+
+ res_vui = vec_mule(vus, vus);
+// CHECK: @llvm.ppc.altivec.vmuleuh
+// CHECK-LE: @llvm.ppc.altivec.vmulouh
+
+ res_vs = vec_vmulesb(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vmulesb
+// CHECK-LE: @llvm.ppc.altivec.vmulosb
+
+ res_vus = vec_vmuleub(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vmuleub
+// CHECK-LE: @llvm.ppc.altivec.vmuloub
+
+ res_vi = vec_vmulesh(vs, vs);
+// CHECK: @llvm.ppc.altivec.vmulesh
+// CHECK-LE: @llvm.ppc.altivec.vmulosh
+
+ res_vui = vec_vmuleuh(vus, vus);
+// CHECK: @llvm.ppc.altivec.vmuleuh
+// CHECK-LE: @llvm.ppc.altivec.vmulouh
/* vec_mulo */
- res_vs = vec_mulo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulosb
- res_vus = vec_mulo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuloub
- res_vi = vec_mulo(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh
- res_vui = vec_mulo(vus, vus); // CHECK: @llvm.ppc.altivec.vmulouh
- res_vs = vec_vmulosb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulosb
- res_vus = vec_vmuloub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuloub
- res_vi = vec_vmulosh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh
- res_vui = vec_vmulouh(vus, vus); // CHECK: @llvm.ppc.altivec.vmulouh
+ res_vs = vec_mulo(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vmulosb
+// CHECK-LE: @llvm.ppc.altivec.vmulesb
+
+ res_vus = vec_mulo(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vmuloub
+// CHECK-LE: @llvm.ppc.altivec.vmuleub
+
+ res_vi = vec_mulo(vs, vs);
+// CHECK: @llvm.ppc.altivec.vmulosh
+// CHECK-LE: @llvm.ppc.altivec.vmulesh
+
+ res_vui = vec_mulo(vus, vus);
+// CHECK: @llvm.ppc.altivec.vmulouh
+// CHECK-LE: @llvm.ppc.altivec.vmuleuh
+
+ res_vs = vec_vmulosb(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vmulosb
+// CHECK-LE: @llvm.ppc.altivec.vmulesb
+
+ res_vus = vec_vmuloub(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vmuloub
+// CHECK-LE: @llvm.ppc.altivec.vmuleub
+
+ res_vi = vec_vmulosh(vs, vs);
+// CHECK: @llvm.ppc.altivec.vmulosh
+// CHECK-LE: @llvm.ppc.altivec.vmulesh
+
+ res_vui = vec_vmulouh(vus, vus);
+// CHECK: @llvm.ppc.altivec.vmulouh
+// CHECK-LE: @llvm.ppc.altivec.vmuleuh
/* vec_nmsub */
- res_vf = vec_nmsub(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp
- res_vf = vec_vnmsubfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp
+ res_vf = vec_nmsub(vf, vf, vf);
+// CHECK: @llvm.ppc.altivec.vnmsubfp
+// CHECK-LE: @llvm.ppc.altivec.vnmsubfp
+
+ res_vf = vec_vnmsubfp(vf, vf, vf);
+// CHECK: @llvm.ppc.altivec.vnmsubfp
+// CHECK-LE: @llvm.ppc.altivec.vnmsubfp
/* vec_nor */
- res_vsc = vec_nor(vsc, vsc); // CHECK: or <16 x i8>
- // CHECK: xor <16 x i8>
-
- res_vuc = vec_nor(vuc, vuc); // CHECK: or <16 x i8>
- // CHECK: xor <16 x i8>
-
- res_vuc = vec_nor(vbc, vbc); // CHECK: or <16 x i8>
- // CHECK: xor <16 x i8>
-
- res_vs = vec_nor(vs, vs); // CHECK: or <8 x i16>
- // CHECK: xor <8 x i16>
-
- res_vus = vec_nor(vus, vus); // CHECK: or <8 x i16>
- // CHECK: xor <8 x i16>
-
- res_vus = vec_nor(vbs, vbs); // CHECK: or <8 x i16>
- // CHECK: xor <8 x i16>
-
- res_vi = vec_nor(vi, vi); // CHECK: or <4 x i32>
- // CHECK: xor <4 x i32>
-
- res_vui = vec_nor(vui, vui); // CHECK: or <4 x i32>
- // CHECK: xor <4 x i32>
-
- res_vui = vec_nor(vbi, vbi); // CHECK: or <4 x i32>
- // CHECK: xor <4 x i32>
-
- res_vf = vec_nor(vf, vf); // CHECK: or <4 x i32>
- // CHECK: xor <4 x i32>
-
- res_vsc = vec_vnor(vsc, vsc); // CHECK: or <16 x i8>
- // CHECK: xor <16 x i8>
-
- res_vuc = vec_vnor(vuc, vuc); // CHECK: or <16 x i8>
- // CHECK: xor <16 x i8>
-
- res_vuc = vec_vnor(vbc, vbc); // CHECK: or <16 x i8>
- // CHECK: xor <16 x i8>
-
- res_vs = vec_vnor(vs, vs); // CHECK: or <8 x i16>
- // CHECK: xor <8 x i16>
-
- res_vus = vec_vnor(vus, vus); // CHECK: or <8 x i16>
- // CHECK: xor <8 x i16>
-
- res_vus = vec_vnor(vbs, vbs); // CHECK: or <8 x i16>
- // CHECK: xor <8 x i16>
-
- res_vi = vec_vnor(vi, vi); // CHECK: or <4 x i32>
- // CHECK: xor <4 x i32>
-
- res_vui = vec_vnor(vui, vui); // CHECK: or <4 x i32>
- // CHECK: xor <4 x i32>
-
- res_vui = vec_vnor(vbi, vbi); // CHECK: or <4 x i32>
- // CHECK: xor <4 x i32>
-
- res_vf = vec_vnor(vf, vf); // CHECK: or <4 x i32>
- // CHECK: xor <4 x i32>
+ res_vsc = vec_nor(vsc, vsc);
+// CHECK: or <16 x i8>
+// CHECK: xor <16 x i8>
+// CHECK-LE: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vuc = vec_nor(vuc, vuc);
+// CHECK: or <16 x i8>
+// CHECK: xor <16 x i8>
+// CHECK-LE: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vuc = vec_nor(vbc, vbc);
+// CHECK: or <16 x i8>
+// CHECK: xor <16 x i8>
+// CHECK-LE: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vs = vec_nor(vs, vs);
+// CHECK: or <8 x i16>
+// CHECK: xor <8 x i16>
+// CHECK-LE: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vus = vec_nor(vus, vus);
+// CHECK: or <8 x i16>
+// CHECK: xor <8 x i16>
+// CHECK-LE: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vus = vec_nor(vbs, vbs);
+// CHECK: or <8 x i16>
+// CHECK: xor <8 x i16>
+// CHECK-LE: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vi = vec_nor(vi, vi);
+// CHECK: or <4 x i32>
+// CHECK: xor <4 x i32>
+// CHECK-LE: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vui = vec_nor(vui, vui);
+// CHECK: or <4 x i32>
+// CHECK: xor <4 x i32>
+// CHECK-LE: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vui = vec_nor(vbi, vbi);
+// CHECK: or <4 x i32>
+// CHECK: xor <4 x i32>
+// CHECK-LE: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vf = vec_nor(vf, vf);
+// CHECK: or <4 x i32>
+// CHECK: xor <4 x i32>
+// CHECK-LE: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vsc = vec_vnor(vsc, vsc);
+// CHECK: or <16 x i8>
+// CHECK: xor <16 x i8>
+// CHECK-LE: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vuc = vec_vnor(vuc, vuc);
+// CHECK: or <16 x i8>
+// CHECK: xor <16 x i8>
+// CHECK-LE: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vuc = vec_vnor(vbc, vbc);
+// CHECK: or <16 x i8>
+// CHECK: xor <16 x i8>
+// CHECK-LE: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vs = vec_vnor(vs, vs);
+// CHECK: or <8 x i16>
+// CHECK: xor <8 x i16>
+// CHECK-LE: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vus = vec_vnor(vus, vus);
+// CHECK: or <8 x i16>
+// CHECK: xor <8 x i16>
+// CHECK-LE: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vus = vec_vnor(vbs, vbs);
+// CHECK: or <8 x i16>
+// CHECK: xor <8 x i16>
+// CHECK-LE: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vi = vec_vnor(vi, vi);
+// CHECK: or <4 x i32>
+// CHECK: xor <4 x i32>
+// CHECK-LE: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vui = vec_vnor(vui, vui);
+// CHECK: or <4 x i32>
+// CHECK: xor <4 x i32>
+// CHECK-LE: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vui = vec_vnor(vbi, vbi);
+// CHECK: or <4 x i32>
+// CHECK: xor <4 x i32>
+// CHECK-LE: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vf = vec_vnor(vf, vf);
+// CHECK: or <4 x i32>
+// CHECK: xor <4 x i32>
+// CHECK-LE: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
/* vec_or */
- res_vsc = vec_or(vsc, vsc); // CHECK: or <16 x i8>
- res_vsc = vec_or(vbc, vsc); // CHECK: or <16 x i8>
- res_vsc = vec_or(vsc, vbc); // CHECK: or <16 x i8>
- res_vuc = vec_or(vuc, vuc); // CHECK: or <16 x i8>
- res_vuc = vec_or(vbc, vuc); // CHECK: or <16 x i8>
- res_vuc = vec_or(vuc, vbc); // CHECK: or <16 x i8>
- res_vbc = vec_or(vbc, vbc); // CHECK: or <16 x i8>
- res_vs = vec_or(vs, vs); // CHECK: or <8 x i16>
- res_vs = vec_or(vbs, vs); // CHECK: or <8 x i16>
- res_vs = vec_or(vs, vbs); // CHECK: or <8 x i16>
- res_vus = vec_or(vus, vus); // CHECK: or <8 x i16>
- res_vus = vec_or(vbs, vus); // CHECK: or <8 x i16>
- res_vus = vec_or(vus, vbs); // CHECK: or <8 x i16>
- res_vbs = vec_or(vbs, vbs); // CHECK: or <8 x i16>
- res_vi = vec_or(vi, vi); // CHECK: or <4 x i32>
- res_vi = vec_or(vbi, vi); // CHECK: or <4 x i32>
- res_vi = vec_or(vi, vbi); // CHECK: or <4 x i32>
- res_vui = vec_or(vui, vui); // CHECK: or <4 x i32>
- res_vui = vec_or(vbi, vui); // CHECK: or <4 x i32>
- res_vui = vec_or(vui, vbi); // CHECK: or <4 x i32>
- res_vbi = vec_or(vbi, vbi); // CHECK: or <4 x i32>
- res_vf = vec_or(vf, vf); // CHECK: or <4 x i32>
- res_vf = vec_or(vbi, vf); // CHECK: or <4 x i32>
- res_vf = vec_or(vf, vbi); // CHECK: or <4 x i32>
- res_vsc = vec_vor(vsc, vsc); // CHECK: or <16 x i8>
- res_vsc = vec_vor(vbc, vsc); // CHECK: or <16 x i8>
- res_vsc = vec_vor(vsc, vbc); // CHECK: or <16 x i8>
- res_vuc = vec_vor(vuc, vuc); // CHECK: or <16 x i8>
- res_vuc = vec_vor(vbc, vuc); // CHECK: or <16 x i8>
- res_vuc = vec_vor(vuc, vbc); // CHECK: or <16 x i8>
- res_vbc = vec_vor(vbc, vbc); // CHECK: or <16 x i8>
- res_vs = vec_vor(vs, vs); // CHECK: or <8 x i16>
- res_vs = vec_vor(vbs, vs); // CHECK: or <8 x i16>
- res_vs = vec_vor(vs, vbs); // CHECK: or <8 x i16>
- res_vus = vec_vor(vus, vus); // CHECK: or <8 x i16>
- res_vus = vec_vor(vbs, vus); // CHECK: or <8 x i16>
- res_vus = vec_vor(vus, vbs); // CHECK: or <8 x i16>
- res_vbs = vec_vor(vbs, vbs); // CHECK: or <8 x i16>
- res_vi = vec_vor(vi, vi); // CHECK: or <4 x i32>
- res_vi = vec_vor(vbi, vi); // CHECK: or <4 x i32>
- res_vi = vec_vor(vi, vbi); // CHECK: or <4 x i32>
- res_vui = vec_vor(vui, vui); // CHECK: or <4 x i32>
- res_vui = vec_vor(vbi, vui); // CHECK: or <4 x i32>
- res_vui = vec_vor(vui, vbi); // CHECK: or <4 x i32>
- res_vbi = vec_vor(vbi, vbi); // CHECK: or <4 x i32>
- res_vf = vec_vor(vf, vf); // CHECK: or <4 x i32>
- res_vf = vec_vor(vbi, vf); // CHECK: or <4 x i32>
- res_vf = vec_vor(vf, vbi); // CHECK: or <4 x i32>
+ res_vsc = vec_or(vsc, vsc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vsc = vec_or(vbc, vsc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vsc = vec_or(vsc, vbc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vuc = vec_or(vuc, vuc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vuc = vec_or(vbc, vuc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vuc = vec_or(vuc, vbc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vbc = vec_or(vbc, vbc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vs = vec_or(vs, vs);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vs = vec_or(vbs, vs);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vs = vec_or(vs, vbs);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vus = vec_or(vus, vus);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vus = vec_or(vbs, vus);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vus = vec_or(vus, vbs);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vbs = vec_or(vbs, vbs);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vi = vec_or(vi, vi);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vi = vec_or(vbi, vi);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vi = vec_or(vi, vbi);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vui = vec_or(vui, vui);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vui = vec_or(vbi, vui);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vui = vec_or(vui, vbi);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vbi = vec_or(vbi, vbi);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vf = vec_or(vf, vf);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vf = vec_or(vbi, vf);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vf = vec_or(vf, vbi);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vsc = vec_vor(vsc, vsc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vsc = vec_vor(vbc, vsc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vsc = vec_vor(vsc, vbc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vuc = vec_vor(vuc, vuc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vuc = vec_vor(vbc, vuc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vuc = vec_vor(vuc, vbc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vbc = vec_vor(vbc, vbc);
+// CHECK: or <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vs = vec_vor(vs, vs);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vs = vec_vor(vbs, vs);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vs = vec_vor(vs, vbs);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vus = vec_vor(vus, vus);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vus = vec_vor(vbs, vus);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vus = vec_vor(vus, vbs);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vbs = vec_vor(vbs, vbs);
+// CHECK: or <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vi = vec_vor(vi, vi);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vi = vec_vor(vbi, vi);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vi = vec_vor(vi, vbi);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vui = vec_vor(vui, vui);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vui = vec_vor(vbi, vui);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vui = vec_vor(vui, vbi);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vbi = vec_vor(vbi, vbi);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vf = vec_vor(vf, vf);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vf = vec_vor(vbi, vf);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vf = vec_vor(vf, vbi);
+// CHECK: or <4 x i32>
+// CHECK-LE: or <4 x i32>
/* vec_pack */
- res_vsc = vec_pack(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
- res_vuc = vec_pack(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
- res_vbc = vec_pack(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
- res_vs = vec_pack(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
- res_vus = vec_pack(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
- res_vbs = vec_pack(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
- res_vsc = vec_vpkuhum(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
- res_vuc = vec_vpkuhum(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
- res_vbc = vec_vpkuhum(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
- res_vs = vec_vpkuwum(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
- res_vus = vec_vpkuwum(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
- res_vbs = vec_vpkuwum(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
+ res_vsc = vec_pack(vs, vs);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_pack(vus, vus);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_pack(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_pack(vi, vi);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_pack(vui, vui);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_pack(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsc = vec_vpkuhum(vs, vs);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_vpkuhum(vus, vus);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_vpkuhum(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_vpkuwum(vi, vi);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_vpkuwum(vui, vui);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_vpkuwum(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_packpx */
- res_vp = vec_packpx(vui, vui); // CHECK: @llvm.ppc.altivec.vpkpx
- res_vp = vec_vpkpx(vui, vui); // CHECK: @llvm.ppc.altivec.vpkpx
+ res_vp = vec_packpx(vui, vui);
+// CHECK: @llvm.ppc.altivec.vpkpx
+// CHECK-LE: @llvm.ppc.altivec.vpkpx
+
+ res_vp = vec_vpkpx(vui, vui);
+// CHECK: @llvm.ppc.altivec.vpkpx
+// CHECK-LE: @llvm.ppc.altivec.vpkpx
/* vec_packs */
- res_vsc = vec_packs(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss
- res_vuc = vec_packs(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
- res_vs = vec_packs(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss
- res_vus = vec_packs(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
- res_vsc = vec_vpkshss(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss
- res_vuc = vec_vpkuhus(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
- res_vs = vec_vpkswss(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss
- res_vus = vec_vpkuwus(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
+ res_vsc = vec_packs(vs, vs);
+// CHECK: @llvm.ppc.altivec.vpkshss
+// CHECK-LE: @llvm.ppc.altivec.vpkshss
+
+ res_vuc = vec_packs(vus, vus);
+// CHECK: @llvm.ppc.altivec.vpkuhus
+// CHECK-LE: @llvm.ppc.altivec.vpkuhus
+
+ res_vs = vec_packs(vi, vi);
+// CHECK: @llvm.ppc.altivec.vpkswss
+// CHECK-LE: @llvm.ppc.altivec.vpkswss
+
+ res_vus = vec_packs(vui, vui);
+// CHECK: @llvm.ppc.altivec.vpkuwus
+// CHECK-LE: @llvm.ppc.altivec.vpkuwus
+
+ res_vsc = vec_vpkshss(vs, vs);
+// CHECK: @llvm.ppc.altivec.vpkshss
+// CHECK-LE: @llvm.ppc.altivec.vpkshss
+
+ res_vuc = vec_vpkuhus(vus, vus);
+// CHECK: @llvm.ppc.altivec.vpkuhus
+// CHECK-LE: @llvm.ppc.altivec.vpkuhus
+
+ res_vs = vec_vpkswss(vi, vi);
+// CHECK: @llvm.ppc.altivec.vpkswss
+// CHECK-LE: @llvm.ppc.altivec.vpkswss
+
+ res_vus = vec_vpkuwus(vui, vui);
+// CHECK: @llvm.ppc.altivec.vpkuwus
+// CHECK-LE: @llvm.ppc.altivec.vpkuwus
/* vec_packsu */
- res_vuc = vec_packsu(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus
- res_vuc = vec_packsu(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
- res_vus = vec_packsu(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus
- res_vus = vec_packsu(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
- res_vuc = vec_vpkshus(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus
- res_vuc = vec_vpkshus(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus
- res_vus = vec_vpkswus(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus
- res_vus = vec_vpkswus(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus
+ res_vuc = vec_packsu(vs, vs);
+// CHECK: @llvm.ppc.altivec.vpkshus
+// CHECK-LE: @llvm.ppc.altivec.vpkshus
+
+ res_vuc = vec_packsu(vus, vus);
+// CHECK: @llvm.ppc.altivec.vpkuhus
+// CHECK-LE: @llvm.ppc.altivec.vpkuhus
+
+ res_vus = vec_packsu(vi, vi);
+// CHECK: @llvm.ppc.altivec.vpkswus
+// CHECK-LE: @llvm.ppc.altivec.vpkswus
+
+ res_vus = vec_packsu(vui, vui);
+// CHECK: @llvm.ppc.altivec.vpkuwus
+// CHECK-LE: @llvm.ppc.altivec.vpkuwus
+
+ res_vuc = vec_vpkshus(vs, vs);
+// CHECK: @llvm.ppc.altivec.vpkshus
+// CHECK-LE: @llvm.ppc.altivec.vpkshus
+
+ res_vuc = vec_vpkshus(vus, vus);
+// CHECK: @llvm.ppc.altivec.vpkuhus
+// CHECK-LE: @llvm.ppc.altivec.vpkuhus
+
+ res_vus = vec_vpkswus(vi, vi);
+// CHECK: @llvm.ppc.altivec.vpkswus
+// CHECK-LE: @llvm.ppc.altivec.vpkswus
+
+ res_vus = vec_vpkswus(vui, vui);
+// CHECK: @llvm.ppc.altivec.vpkuwus
+// CHECK-LE: @llvm.ppc.altivec.vpkuwus
/* vec_perm */
- res_vsc = vec_perm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vuc = vec_perm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vbc = vec_perm(vbc, vbc, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vs = vec_perm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vus = vec_perm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vbs = vec_perm(vbs, vbs, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vp = vec_perm(vp, vp, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vi = vec_perm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vui = vec_perm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vbi = vec_perm(vbi, vbi, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vf = vec_perm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vsc = vec_vperm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vuc = vec_vperm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vbc = vec_vperm(vbc, vbc, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vs = vec_vperm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vus = vec_vperm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vbs = vec_vperm(vbs, vbs, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vp = vec_vperm(vp, vp, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vi = vec_vperm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vui = vec_vperm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vbi = vec_vperm(vbi, vbi, vuc); // CHECK: @llvm.ppc.altivec.vperm
- res_vf = vec_vperm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
+ res_vsc = vec_perm(vsc, vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_perm(vuc, vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_perm(vbc, vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_perm(vs, vs, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_perm(vus, vus, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_perm(vbs, vbs, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_perm(vp, vp, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_perm(vi, vi, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_perm(vui, vui, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbi = vec_perm(vbi, vbi, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_perm(vf, vf, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsc = vec_vperm(vsc, vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_vperm(vuc, vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_vperm(vbc, vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_vperm(vs, vs, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_vperm(vus, vus, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_vperm(vbs, vbs, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_vperm(vp, vp, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_vperm(vi, vi, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_vperm(vui, vui, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbi = vec_vperm(vbi, vbi, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_vperm(vf, vf, vuc);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_re */
- res_vf = vec_re(vf); // CHECK: @llvm.ppc.altivec.vrefp
- res_vf = vec_vrefp(vf); // CHECK: @llvm.ppc.altivec.vrefp
+ res_vf = vec_re(vf);
+// CHECK: @llvm.ppc.altivec.vrefp
+// CHECK-LE: @llvm.ppc.altivec.vrefp
+
+ res_vf = vec_vrefp(vf);
+// CHECK: @llvm.ppc.altivec.vrefp
+// CHECK-LE: @llvm.ppc.altivec.vrefp
/* vec_rl */
- res_vsc = vec_rl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
- res_vuc = vec_rl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
- res_vs = vec_rl(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh
- res_vus = vec_rl(vus, vus); // CHECK: @llvm.ppc.altivec.vrlh
- res_vi = vec_rl(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw
- res_vui = vec_rl(vui, vui); // CHECK: @llvm.ppc.altivec.vrlw
- res_vsc = vec_vrlb(vsc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
- res_vuc = vec_vrlb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vrlb
- res_vs = vec_vrlh(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh
- res_vus = vec_vrlh(vus, vus); // CHECK: @llvm.ppc.altivec.vrlh
- res_vi = vec_vrlw(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw
- res_vui = vec_vrlw(vui, vui); // CHECK: @llvm.ppc.altivec.vrlw
+ res_vsc = vec_rl(vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vrlb
+// CHECK-LE: @llvm.ppc.altivec.vrlb
+
+ res_vuc = vec_rl(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vrlb
+// CHECK-LE: @llvm.ppc.altivec.vrlb
+
+ res_vs = vec_rl(vs, vus);
+// CHECK: @llvm.ppc.altivec.vrlh
+// CHECK-LE: @llvm.ppc.altivec.vrlh
+
+ res_vus = vec_rl(vus, vus);
+// CHECK: @llvm.ppc.altivec.vrlh
+// CHECK-LE: @llvm.ppc.altivec.vrlh
+
+ res_vi = vec_rl(vi, vui);
+// CHECK: @llvm.ppc.altivec.vrlw
+// CHECK-LE: @llvm.ppc.altivec.vrlw
+
+ res_vui = vec_rl(vui, vui);
+// CHECK: @llvm.ppc.altivec.vrlw
+// CHECK-LE: @llvm.ppc.altivec.vrlw
+
+ res_vsc = vec_vrlb(vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vrlb
+// CHECK-LE: @llvm.ppc.altivec.vrlb
+
+ res_vuc = vec_vrlb(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vrlb
+// CHECK-LE: @llvm.ppc.altivec.vrlb
+
+ res_vs = vec_vrlh(vs, vus);
+// CHECK: @llvm.ppc.altivec.vrlh
+// CHECK-LE: @llvm.ppc.altivec.vrlh
+
+ res_vus = vec_vrlh(vus, vus);
+// CHECK: @llvm.ppc.altivec.vrlh
+// CHECK-LE: @llvm.ppc.altivec.vrlh
+
+ res_vi = vec_vrlw(vi, vui);
+// CHECK: @llvm.ppc.altivec.vrlw
+// CHECK-LE: @llvm.ppc.altivec.vrlw
+
+ res_vui = vec_vrlw(vui, vui);
+// CHECK: @llvm.ppc.altivec.vrlw
+// CHECK-LE: @llvm.ppc.altivec.vrlw
/* vec_round */
- res_vf = vec_round(vf); // CHECK: @llvm.ppc.altivec.vrfin
- res_vf = vec_vrfin(vf); // CHECK: @llvm.ppc.altivec.vrfin
+ res_vf = vec_round(vf);
+// CHECK: @llvm.ppc.altivec.vrfin
+// CHECK-LE: @llvm.ppc.altivec.vrfin
+
+ res_vf = vec_vrfin(vf);
+// CHECK: @llvm.ppc.altivec.vrfin
+// CHECK-LE: @llvm.ppc.altivec.vrfin
/* vec_rsqrte */
- res_vf = vec_rsqrte(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp
- res_vf = vec_vrsqrtefp(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp
+ res_vf = vec_rsqrte(vf);
+// CHECK: @llvm.ppc.altivec.vrsqrtefp
+// CHECK-LE: @llvm.ppc.altivec.vrsqrtefp
+
+ res_vf = vec_vrsqrtefp(vf);
+// CHECK: @llvm.ppc.altivec.vrsqrtefp
+// CHECK-LE: @llvm.ppc.altivec.vrsqrtefp
/* vec_sel */
- res_vsc = vec_sel(vsc, vsc, vuc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: or <16 x i8>
-
- res_vsc = vec_sel(vsc, vsc, vbc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: or <16 x i8>
-
- res_vuc = vec_sel(vuc, vuc, vuc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: or <16 x i8>
-
- res_vuc = vec_sel(vuc, vuc, vbc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: or <16 x i8>
-
- res_vbc = vec_sel(vbc, vbc, vuc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: or <16 x i8>
-
- res_vbc = vec_sel(vbc, vbc, vbc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: or <16 x i8>
-
- res_vs = vec_sel(vs, vs, vus); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: or <8 x i16>
-
- res_vs = vec_sel(vs, vs, vbs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: or <8 x i16>
-
- res_vus = vec_sel(vus, vus, vus); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: or <8 x i16>
-
- res_vus = vec_sel(vus, vus, vbs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: or <8 x i16>
-
- res_vbs = vec_sel(vbs, vbs, vus); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: or <8 x i16>
-
- res_vbs = vec_sel(vbs, vbs, vbs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: or <8 x i16>
-
- res_vi = vec_sel(vi, vi, vui); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vi = vec_sel(vi, vi, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vui = vec_sel(vui, vui, vui); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vui = vec_sel(vui, vui, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vbi = vec_sel(vbi, vbi, vui); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vbi = vec_sel(vbi, vbi, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vf = vec_sel(vf, vf, vui); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vf = vec_sel(vf, vf, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vsc = vec_vsel(vsc, vsc, vuc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: or <16 x i8>
-
- res_vsc = vec_vsel(vsc, vsc, vbc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: or <16 x i8>
-
- res_vuc = vec_vsel(vuc, vuc, vuc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: or <16 x i8>
-
- res_vuc = vec_vsel(vuc, vuc, vbc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: or <16 x i8>
-
- res_vbc = vec_vsel(vbc, vbc, vuc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: or <16 x i8>
-
- res_vbc = vec_vsel(vbc, vbc, vbc); // CHECK: xor <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: and <16 x i8>
- // CHECK: or <16 x i8>
-
- res_vs = vec_vsel(vs, vs, vus); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: or <8 x i16>
-
- res_vs = vec_vsel(vs, vs, vbs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: or <8 x i16>
-
- res_vus = vec_vsel(vus, vus, vus); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: or <8 x i16>
-
- res_vus = vec_vsel(vus, vus, vbs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: or <8 x i16>
-
- res_vbs = vec_vsel(vbs, vbs, vus); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: or <8 x i16>
-
- res_vbs = vec_vsel(vbs, vbs, vbs); // CHECK: xor <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: and <8 x i16>
- // CHECK: or <8 x i16>
-
- res_vi = vec_vsel(vi, vi, vui); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vi = vec_vsel(vi, vi, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vui = vec_vsel(vui, vui, vui); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vui = vec_vsel(vui, vui, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vbi = vec_vsel(vbi, vbi, vui); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vbi = vec_vsel(vbi, vbi, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vf = vec_vsel(vf, vf, vui); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
-
- res_vf = vec_vsel(vf, vf, vbi); // CHECK: xor <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: and <4 x i32>
- // CHECK: or <4 x i32>
+ res_vsc = vec_sel(vsc, vsc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vsc = vec_sel(vsc, vsc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vuc = vec_sel(vuc, vuc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vuc = vec_sel(vuc, vuc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vbc = vec_sel(vbc, vbc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vbc = vec_sel(vbc, vbc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vs = vec_sel(vs, vs, vus);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vs = vec_sel(vs, vs, vbs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vus = vec_sel(vus, vus, vus);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vus = vec_sel(vus, vus, vbs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vbs = vec_sel(vbs, vbs, vus);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vbs = vec_sel(vbs, vbs, vbs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vi = vec_sel(vi, vi, vui);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vi = vec_sel(vi, vi, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vui = vec_sel(vui, vui, vui);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vui = vec_sel(vui, vui, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vbi = vec_sel(vbi, vbi, vui);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vbi = vec_sel(vbi, vbi, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vf = vec_sel(vf, vf, vui);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vf = vec_sel(vf, vf, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vsc = vec_vsel(vsc, vsc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vsc = vec_vsel(vsc, vsc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vuc = vec_vsel(vuc, vuc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vuc = vec_vsel(vuc, vuc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vbc = vec_vsel(vbc, vbc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vbc = vec_vsel(vbc, vbc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: and <16 x i8>
+// CHECK: or <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: and <16 x i8>
+// CHECK-LE: or <16 x i8>
+
+ res_vs = vec_vsel(vs, vs, vus);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vs = vec_vsel(vs, vs, vbs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vus = vec_vsel(vus, vus, vus);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vus = vec_vsel(vus, vus, vbs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vbs = vec_vsel(vbs, vbs, vus);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vbs = vec_vsel(vbs, vbs, vbs);
+// CHECK: xor <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: and <8 x i16>
+// CHECK: or <8 x i16>
+// CHECK-LE: xor <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: and <8 x i16>
+// CHECK-LE: or <8 x i16>
+
+ res_vi = vec_vsel(vi, vi, vui);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vi = vec_vsel(vi, vi, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vui = vec_vsel(vui, vui, vui);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vui = vec_vsel(vui, vui, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vbi = vec_vsel(vbi, vbi, vui);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vbi = vec_vsel(vbi, vbi, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vf = vec_vsel(vf, vf, vui);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
+
+ res_vf = vec_vsel(vf, vf, vbi);
+// CHECK: xor <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: and <4 x i32>
+// CHECK: or <4 x i32>
+// CHECK-LE: xor <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: and <4 x i32>
+// CHECK-LE: or <4 x i32>
/* vec_sl */
- res_vsc = vec_sl(vsc, vuc); // CHECK: shl <16 x i8>
- res_vuc = vec_sl(vuc, vuc); // CHECK: shl <16 x i8>
- res_vs = vec_sl(vs, vus); // CHECK: shl <8 x i16>
- res_vus = vec_sl(vus, vus); // CHECK: shl <8 x i16>
- res_vi = vec_sl(vi, vui); // CHECK: shl <4 x i32>
- res_vui = vec_sl(vui, vui); // CHECK: shl <4 x i32>
- res_vsc = vec_vslb(vsc, vuc); // CHECK: shl <16 x i8>
- res_vuc = vec_vslb(vuc, vuc); // CHECK: shl <16 x i8>
- res_vs = vec_vslh(vs, vus); // CHECK: shl <8 x i16>
- res_vus = vec_vslh(vus, vus); // CHECK: shl <8 x i16>
- res_vi = vec_vslw(vi, vui); // CHECK: shl <4 x i32>
- res_vui = vec_vslw(vui, vui); // CHECK: shl <4 x i32>
+ res_vsc = vec_sl(vsc, vuc);
+// CHECK: shl <16 x i8>
+// CHECK-LE: shl <16 x i8>
+
+ res_vuc = vec_sl(vuc, vuc);
+// CHECK: shl <16 x i8>
+// CHECK-LE: shl <16 x i8>
+
+ res_vs = vec_sl(vs, vus);
+// CHECK: shl <8 x i16>
+// CHECK-LE: shl <8 x i16>
+
+ res_vus = vec_sl(vus, vus);
+// CHECK: shl <8 x i16>
+// CHECK-LE: shl <8 x i16>
+
+ res_vi = vec_sl(vi, vui);
+// CHECK: shl <4 x i32>
+// CHECK-LE: shl <4 x i32>
+
+ res_vui = vec_sl(vui, vui);
+// CHECK: shl <4 x i32>
+// CHECK-LE: shl <4 x i32>
+
+ res_vsc = vec_vslb(vsc, vuc);
+// CHECK: shl <16 x i8>
+// CHECK-LE: shl <16 x i8>
+
+ res_vuc = vec_vslb(vuc, vuc);
+// CHECK: shl <16 x i8>
+// CHECK-LE: shl <16 x i8>
+
+ res_vs = vec_vslh(vs, vus);
+// CHECK: shl <8 x i16>
+// CHECK-LE: shl <8 x i16>
+
+ res_vus = vec_vslh(vus, vus);
+// CHECK: shl <8 x i16>
+// CHECK-LE: shl <8 x i16>
+
+ res_vi = vec_vslw(vi, vui);
+// CHECK: shl <4 x i32>
+// CHECK-LE: shl <4 x i32>
+
+ res_vui = vec_vslw(vui, vui);
+// CHECK: shl <4 x i32>
+// CHECK-LE: shl <4 x i32>
/* vec_sld */
- res_vsc = vec_sld(vsc, vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vuc = vec_sld(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vs = vec_sld(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vus = vec_sld(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vp = vec_sld(vp, vp, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vi = vec_sld(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vui = vec_sld(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vf = vec_sld(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vsc = vec_vsldoi(vsc, vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vuc = vec_vsldoi(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vs = vec_vsldoi(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vus = vec_vsldoi(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vp = vec_vsldoi(vp, vp, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vi = vec_vsldoi(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vui = vec_vsldoi(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vf = vec_vsldoi(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
+ res_vsc = vec_sld(vsc, vsc, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_sld(vuc, vuc, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_sld(vs, vs, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_sld(vus, vus, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_sld(vp, vp, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_sld(vi, vi, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_sld(vui, vui, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_sld(vf, vf, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsc = vec_vsldoi(vsc, vsc, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_vsldoi(vuc, vuc, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_vsldoi(vs, vs, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_vsldoi(vus, vus, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_vsldoi(vp, vp, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_vsldoi(vi, vi, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_vsldoi(vui, vui, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_vsldoi(vf, vf, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_sll */
- res_vsc = vec_sll(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vsc = vec_sll(vsc, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vsc = vec_sll(vsc, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vuc = vec_sll(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vuc = vec_sll(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vuc = vec_sll(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vbc = vec_sll(vbc, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vbc = vec_sll(vbc, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vbc = vec_sll(vbc, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vs = vec_sll(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vs = vec_sll(vs, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vs = vec_sll(vs, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vus = vec_sll(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vus = vec_sll(vus, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vus = vec_sll(vus, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vbs = vec_sll(vbs, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vbs = vec_sll(vbs, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vbs = vec_sll(vbs, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vp = vec_sll(vp, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vp = vec_sll(vp, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vp = vec_sll(vp, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vi = vec_sll(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vi = vec_sll(vi, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vi = vec_sll(vi, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vui = vec_sll(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vui = vec_sll(vui, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vui = vec_sll(vui, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vbi = vec_sll(vbi, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vbi = vec_sll(vbi, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vbi = vec_sll(vbi, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vsc = vec_vsl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vsc = vec_vsl(vsc, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vsc = vec_vsl(vsc, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vuc = vec_vsl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vuc = vec_vsl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vuc = vec_vsl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vbc = vec_vsl(vbc, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vbc = vec_vsl(vbc, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vbc = vec_vsl(vbc, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vs = vec_vsl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vs = vec_vsl(vs, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vs = vec_vsl(vs, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vus = vec_vsl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vus = vec_vsl(vus, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vus = vec_vsl(vus, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vbs = vec_vsl(vbs, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vbs = vec_vsl(vbs, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vbs = vec_vsl(vbs, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vp = vec_vsl(vp, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vp = vec_vsl(vp, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vp = vec_vsl(vp, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vi = vec_vsl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vi = vec_vsl(vi, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vi = vec_vsl(vi, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vui = vec_vsl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vui = vec_vsl(vui, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vui = vec_vsl(vui, vui); // CHECK: @llvm.ppc.altivec.vsl
- res_vbi = vec_vsl(vbi, vuc); // CHECK: @llvm.ppc.altivec.vsl
- res_vbi = vec_vsl(vbi, vus); // CHECK: @llvm.ppc.altivec.vsl
- res_vbi = vec_vsl(vbi, vui); // CHECK: @llvm.ppc.altivec.vsl
+ res_vsc = vec_sll(vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vsc = vec_sll(vsc, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vsc = vec_sll(vsc, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vuc = vec_sll(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vuc = vec_sll(vuc, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vuc = vec_sll(vuc, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbc = vec_sll(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbc = vec_sll(vbc, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbc = vec_sll(vbc, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vs = vec_sll(vs, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vs = vec_sll(vs, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vs = vec_sll(vs, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vus = vec_sll(vus, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vus = vec_sll(vus, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vus = vec_sll(vus, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbs = vec_sll(vbs, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbs = vec_sll(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbs = vec_sll(vbs, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vp = vec_sll(vp, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vp = vec_sll(vp, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vp = vec_sll(vp, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vi = vec_sll(vi, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vi = vec_sll(vi, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vi = vec_sll(vi, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vui = vec_sll(vui, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vui = vec_sll(vui, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vui = vec_sll(vui, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbi = vec_sll(vbi, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbi = vec_sll(vbi, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbi = vec_sll(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vsc = vec_vsl(vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vsc = vec_vsl(vsc, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vsc = vec_vsl(vsc, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vuc = vec_vsl(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vuc = vec_vsl(vuc, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vuc = vec_vsl(vuc, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbc = vec_vsl(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbc = vec_vsl(vbc, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbc = vec_vsl(vbc, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vs = vec_vsl(vs, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vs = vec_vsl(vs, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vs = vec_vsl(vs, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vus = vec_vsl(vus, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vus = vec_vsl(vus, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vus = vec_vsl(vus, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbs = vec_vsl(vbs, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbs = vec_vsl(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbs = vec_vsl(vbs, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vp = vec_vsl(vp, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vp = vec_vsl(vp, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vp = vec_vsl(vp, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vi = vec_vsl(vi, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vi = vec_vsl(vi, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vi = vec_vsl(vi, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vui = vec_vsl(vui, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vui = vec_vsl(vui, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vui = vec_vsl(vui, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbi = vec_vsl(vbi, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbi = vec_vsl(vbi, vus);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+ res_vbi = vec_vsl(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
/* vec_slo */
- res_vsc = vec_slo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vsc = vec_slo(vsc, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vuc = vec_slo(vuc, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vuc = vec_slo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vs = vec_slo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vs = vec_slo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vus = vec_slo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vus = vec_slo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vp = vec_slo(vp, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vp = vec_slo(vp, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vi = vec_slo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vi = vec_slo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vui = vec_slo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vui = vec_slo(vui, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vf = vec_slo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vf = vec_slo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vsc = vec_vslo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vsc = vec_vslo(vsc, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vuc = vec_vslo(vuc, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vuc = vec_vslo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vs = vec_vslo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vs = vec_vslo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vus = vec_vslo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vus = vec_vslo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vp = vec_vslo(vp, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vp = vec_vslo(vp, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vi = vec_vslo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vi = vec_vslo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vui = vec_vslo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vui = vec_vslo(vui, vuc); // CHECK: @llvm.ppc.altivec.vslo
- res_vf = vec_vslo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo
- res_vf = vec_vslo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo
+ res_vsc = vec_slo(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vsc = vec_slo(vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vuc = vec_slo(vuc, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vuc = vec_slo(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vs = vec_slo(vs, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vs = vec_slo(vs, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vus = vec_slo(vus, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vus = vec_slo(vus, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vp = vec_slo(vp, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vp = vec_slo(vp, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vi = vec_slo(vi, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vi = vec_slo(vi, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vui = vec_slo(vui, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vui = vec_slo(vui, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vf = vec_slo(vf, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vf = vec_slo(vf, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vsc = vec_vslo(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vsc = vec_vslo(vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vuc = vec_vslo(vuc, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vuc = vec_vslo(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vs = vec_vslo(vs, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vs = vec_vslo(vs, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vus = vec_vslo(vus, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vus = vec_vslo(vus, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vp = vec_vslo(vp, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vp = vec_vslo(vp, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vi = vec_vslo(vi, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vi = vec_vslo(vi, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vui = vec_vslo(vui, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vui = vec_vslo(vui, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vf = vec_vslo(vf, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vf = vec_vslo(vf, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
/* vec_splat */
- res_vsc = vec_splat(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vuc = vec_splat(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vbc = vec_splat(vbc, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vs = vec_splat(vs, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vus = vec_splat(vus, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vbs = vec_splat(vbs, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vp = vec_splat(vp, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vi = vec_splat(vi, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vui = vec_splat(vui, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vbi = vec_splat(vbi, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vf = vec_splat(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vsc = vec_vspltb(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vuc = vec_vspltb(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vbc = vec_vspltb(vbc, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vs = vec_vsplth(vs, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vus = vec_vsplth(vus, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vbs = vec_vsplth(vbs, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vp = vec_vsplth(vp, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vi = vec_vspltw(vi, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vui = vec_vspltw(vui, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vbi = vec_vspltw(vbi, 0); // CHECK: @llvm.ppc.altivec.vperm
- res_vf = vec_vspltw(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
+ res_vsc = vec_splat(vsc, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_splat(vuc, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_splat(vbc, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_splat(vs, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_splat(vus, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_splat(vbs, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_splat(vp, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_splat(vi, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_splat(vui, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbi = vec_splat(vbi, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_splat(vf, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsc = vec_vspltb(vsc, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_vspltb(vuc, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_vspltb(vbc, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_vsplth(vs, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_vsplth(vus, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_vsplth(vbs, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_vsplth(vp, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_vspltw(vi, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_vspltw(vui, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbi = vec_vspltw(vbi, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_vspltw(vf, 0);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_splat_s8 */
res_vsc = vec_splat_s8(0x09); // TODO: add check
@@ -1329,1729 +3801,4715 @@ void test6() {
res_vui = vec_splat_u32(0x09); // TODO: add check
/* vec_sr */
- res_vsc = vec_sr(vsc, vuc); // CHECK: shr <16 x i8>
- res_vuc = vec_sr(vuc, vuc); // CHECK: shr <16 x i8>
- res_vs = vec_sr(vs, vus); // CHECK: shr <8 x i16>
- res_vus = vec_sr(vus, vus); // CHECK: shr <8 x i16>
- res_vi = vec_sr(vi, vui); // CHECK: shr <4 x i32>
- res_vui = vec_sr(vui, vui); // CHECK: shr <4 x i32>
- res_vsc = vec_vsrb(vsc, vuc); // CHECK: shr <16 x i8>
- res_vuc = vec_vsrb(vuc, vuc); // CHECK: shr <16 x i8>
- res_vs = vec_vsrh(vs, vus); // CHECK: shr <8 x i16>
- res_vus = vec_vsrh(vus, vus); // CHECK: shr <8 x i16>
- res_vi = vec_vsrw(vi, vui); // CHECK: shr <4 x i32>
- res_vui = vec_vsrw(vui, vui); // CHECK: shr <4 x i32>
+ res_vsc = vec_sr(vsc, vuc);
+// CHECK: shr <16 x i8>
+// CHECK-LE: shr <16 x i8>
+
+ res_vuc = vec_sr(vuc, vuc);
+// CHECK: shr <16 x i8>
+// CHECK-LE: shr <16 x i8>
+
+ res_vs = vec_sr(vs, vus);
+// CHECK: shr <8 x i16>
+// CHECK-LE: shr <8 x i16>
+
+ res_vus = vec_sr(vus, vus);
+// CHECK: shr <8 x i16>
+// CHECK-LE: shr <8 x i16>
+
+ res_vi = vec_sr(vi, vui);
+// CHECK: shr <4 x i32>
+// CHECK-LE: shr <4 x i32>
+
+ res_vui = vec_sr(vui, vui);
+// CHECK: shr <4 x i32>
+// CHECK-LE: shr <4 x i32>
+
+ res_vsc = vec_vsrb(vsc, vuc);
+// CHECK: shr <16 x i8>
+// CHECK-LE: shr <16 x i8>
+
+ res_vuc = vec_vsrb(vuc, vuc);
+// CHECK: shr <16 x i8>
+// CHECK-LE: shr <16 x i8>
+
+ res_vs = vec_vsrh(vs, vus);
+// CHECK: shr <8 x i16>
+// CHECK-LE: shr <8 x i16>
+
+ res_vus = vec_vsrh(vus, vus);
+// CHECK: shr <8 x i16>
+// CHECK-LE: shr <8 x i16>
+
+ res_vi = vec_vsrw(vi, vui);
+// CHECK: shr <4 x i32>
+// CHECK-LE: shr <4 x i32>
+
+ res_vui = vec_vsrw(vui, vui);
+// CHECK: shr <4 x i32>
+// CHECK-LE: shr <4 x i32>
/* vec_sra */
- res_vsc = vec_sra(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
- res_vuc = vec_sra(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
- res_vs = vec_sra(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah
- res_vus = vec_sra(vus, vus); // CHECK: @llvm.ppc.altivec.vsrah
- res_vi = vec_sra(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw
- res_vui = vec_sra(vui, vui); // CHECK: @llvm.ppc.altivec.vsraw
- res_vsc = vec_vsrab(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
- res_vuc = vec_vsrab(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsrab
- res_vs = vec_vsrah(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah
- res_vus = vec_vsrah(vus, vus); // CHECK: @llvm.ppc.altivec.vsrah
- res_vi = vec_vsraw(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw
- res_vui = vec_vsraw(vui, vui); // CHECK: @llvm.ppc.altivec.vsraw
+ res_vsc = vec_sra(vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vsrab
+// CHECK-LE: @llvm.ppc.altivec.vsrab
+
+ res_vuc = vec_sra(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vsrab
+// CHECK-LE: @llvm.ppc.altivec.vsrab
+
+ res_vs = vec_sra(vs, vus);
+// CHECK: @llvm.ppc.altivec.vsrah
+// CHECK-LE: @llvm.ppc.altivec.vsrah
+
+ res_vus = vec_sra(vus, vus);
+// CHECK: @llvm.ppc.altivec.vsrah
+// CHECK-LE: @llvm.ppc.altivec.vsrah
+
+ res_vi = vec_sra(vi, vui);
+// CHECK: @llvm.ppc.altivec.vsraw
+// CHECK-LE: @llvm.ppc.altivec.vsraw
+
+ res_vui = vec_sra(vui, vui);
+// CHECK: @llvm.ppc.altivec.vsraw
+// CHECK-LE: @llvm.ppc.altivec.vsraw
+
+ res_vsc = vec_vsrab(vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vsrab
+// CHECK-LE: @llvm.ppc.altivec.vsrab
+
+ res_vuc = vec_vsrab(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vsrab
+// CHECK-LE: @llvm.ppc.altivec.vsrab
+
+ res_vs = vec_vsrah(vs, vus);
+// CHECK: @llvm.ppc.altivec.vsrah
+// CHECK-LE: @llvm.ppc.altivec.vsrah
+
+ res_vus = vec_vsrah(vus, vus);
+// CHECK: @llvm.ppc.altivec.vsrah
+// CHECK-LE: @llvm.ppc.altivec.vsrah
+
+ res_vi = vec_vsraw(vi, vui);
+// CHECK: @llvm.ppc.altivec.vsraw
+// CHECK-LE: @llvm.ppc.altivec.vsraw
+
+ res_vui = vec_vsraw(vui, vui);
+// CHECK: @llvm.ppc.altivec.vsraw
+// CHECK-LE: @llvm.ppc.altivec.vsraw
/* vec_srl */
- res_vsc = vec_srl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vsc = vec_srl(vsc, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vsc = vec_srl(vsc, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vuc = vec_srl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vuc = vec_srl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vuc = vec_srl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vbc = vec_srl(vbc, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vbc = vec_srl(vbc, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vbc = vec_srl(vbc, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vs = vec_srl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vs = vec_srl(vs, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vs = vec_srl(vs, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vus = vec_srl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vus = vec_srl(vus, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vus = vec_srl(vus, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vbs = vec_srl(vbs, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vbs = vec_srl(vbs, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vbs = vec_srl(vbs, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vp = vec_srl(vp, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vp = vec_srl(vp, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vp = vec_srl(vp, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vi = vec_srl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vi = vec_srl(vi, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vi = vec_srl(vi, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vui = vec_srl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vui = vec_srl(vui, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vui = vec_srl(vui, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vbi = vec_srl(vbi, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vbi = vec_srl(vbi, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vbi = vec_srl(vbi, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vsc = vec_vsr(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vsc = vec_vsr(vsc, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vsc = vec_vsr(vsc, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vuc = vec_vsr(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vuc = vec_vsr(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vuc = vec_vsr(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vbc = vec_vsr(vbc, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vbc = vec_vsr(vbc, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vbc = vec_vsr(vbc, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vs = vec_vsr(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vs = vec_vsr(vs, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vs = vec_vsr(vs, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vus = vec_vsr(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vus = vec_vsr(vus, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vus = vec_vsr(vus, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vbs = vec_vsr(vbs, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vbs = vec_vsr(vbs, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vbs = vec_vsr(vbs, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vp = vec_vsr(vp, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vp = vec_vsr(vp, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vp = vec_vsr(vp, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vi = vec_vsr(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vi = vec_vsr(vi, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vi = vec_vsr(vi, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vui = vec_vsr(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vui = vec_vsr(vui, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vui = vec_vsr(vui, vui); // CHECK: @llvm.ppc.altivec.vsr
- res_vbi = vec_vsr(vbi, vuc); // CHECK: @llvm.ppc.altivec.vsr
- res_vbi = vec_vsr(vbi, vus); // CHECK: @llvm.ppc.altivec.vsr
- res_vbi = vec_vsr(vbi, vui); // CHECK: @llvm.ppc.altivec.vsr
+ res_vsc = vec_srl(vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vsc = vec_srl(vsc, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vsc = vec_srl(vsc, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vuc = vec_srl(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vuc = vec_srl(vuc, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vuc = vec_srl(vuc, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbc = vec_srl(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbc = vec_srl(vbc, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbc = vec_srl(vbc, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vs = vec_srl(vs, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vs = vec_srl(vs, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vs = vec_srl(vs, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vus = vec_srl(vus, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vus = vec_srl(vus, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vus = vec_srl(vus, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbs = vec_srl(vbs, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbs = vec_srl(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbs = vec_srl(vbs, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vp = vec_srl(vp, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vp = vec_srl(vp, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vp = vec_srl(vp, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vi = vec_srl(vi, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vi = vec_srl(vi, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vi = vec_srl(vi, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vui = vec_srl(vui, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vui = vec_srl(vui, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vui = vec_srl(vui, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbi = vec_srl(vbi, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbi = vec_srl(vbi, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbi = vec_srl(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vsc = vec_vsr(vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vsc = vec_vsr(vsc, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vsc = vec_vsr(vsc, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vuc = vec_vsr(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vuc = vec_vsr(vuc, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vuc = vec_vsr(vuc, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbc = vec_vsr(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbc = vec_vsr(vbc, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbc = vec_vsr(vbc, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vs = vec_vsr(vs, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vs = vec_vsr(vs, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vs = vec_vsr(vs, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vus = vec_vsr(vus, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vus = vec_vsr(vus, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vus = vec_vsr(vus, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbs = vec_vsr(vbs, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbs = vec_vsr(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbs = vec_vsr(vbs, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vp = vec_vsr(vp, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vp = vec_vsr(vp, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vp = vec_vsr(vp, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vi = vec_vsr(vi, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vi = vec_vsr(vi, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vi = vec_vsr(vi, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vui = vec_vsr(vui, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vui = vec_vsr(vui, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vui = vec_vsr(vui, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbi = vec_vsr(vbi, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbi = vec_vsr(vbi, vus);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vbi = vec_vsr(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
/* vec_sro */
- res_vsc = vec_sro(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vsc = vec_sro(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vuc = vec_sro(vuc, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vuc = vec_sro(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vs = vec_sro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vs = vec_sro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vus = vec_sro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vus = vec_sro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vp = vec_sro(vp, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vp = vec_sro(vp, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vi = vec_sro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vi = vec_sro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vui = vec_sro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vui = vec_sro(vui, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vf = vec_sro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vf = vec_sro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vsc = vec_vsro(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vsc = vec_vsro(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vuc = vec_vsro(vuc, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vuc = vec_vsro(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vs = vec_vsro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vs = vec_vsro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vus = vec_vsro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vus = vec_vsro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vp = vec_vsro(vp, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vp = vec_vsro(vp, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vi = vec_vsro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vi = vec_vsro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vui = vec_vsro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vui = vec_vsro(vui, vuc); // CHECK: @llvm.ppc.altivec.vsro
- res_vf = vec_vsro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro
- res_vf = vec_vsro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro
+ res_vsc = vec_sro(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vsc = vec_sro(vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vuc = vec_sro(vuc, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vuc = vec_sro(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vs = vec_sro(vs, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vs = vec_sro(vs, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vus = vec_sro(vus, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vus = vec_sro(vus, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vp = vec_sro(vp, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vp = vec_sro(vp, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vi = vec_sro(vi, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vi = vec_sro(vi, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vui = vec_sro(vui, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vui = vec_sro(vui, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vf = vec_sro(vf, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vf = vec_sro(vf, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vsc = vec_vsro(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vsc = vec_vsro(vsc, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vuc = vec_vsro(vuc, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vuc = vec_vsro(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vs = vec_vsro(vs, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vs = vec_vsro(vs, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vus = vec_vsro(vus, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vus = vec_vsro(vus, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vp = vec_vsro(vp, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vp = vec_vsro(vp, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vi = vec_vsro(vi, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vi = vec_vsro(vi, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vui = vec_vsro(vui, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vui = vec_vsro(vui, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vf = vec_vsro(vf, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vf = vec_vsro(vf, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
/* vec_st */
- vec_st(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vbc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vbc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vbc, 0, &vbc); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vbs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vbs, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vp, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vp, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vp, 0, &vp); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vbi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vbi, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
- vec_st(vf, 0, ¶m_f); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vbc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vbc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vbc, 0, &vbc); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vbs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vbs, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vp, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vp, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vp, 0, &vp); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vbi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vbi, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
- vec_stvx(vf, 0, ¶m_f); // CHECK: @llvm.ppc.altivec.stvx
+ vec_st(vsc, 0, &vsc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vsc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vuc, 0, &vuc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vuc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vbc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vbc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vbc, 0, &vbc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vs, 0, &vs);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vus, 0, &vus);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vus, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vbs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vbs, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vbs, 0, &vbs);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vp, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vp, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vp, 0, &vp);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vi, 0, &vi);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vui, 0, &vui);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vui, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vbi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vbi, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vbi, 0, &vbi);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vf, 0, &vf);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_st(vf, 0, ¶m_f);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vsc, 0, &vsc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vsc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vuc, 0, &vuc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vuc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vbc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vbc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vbc, 0, &vbc);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vs, 0, &vs);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vus, 0, &vus);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vus, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vbs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vbs, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vbs, 0, &vbs);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vp, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vp, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vp, 0, &vp);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vi, 0, &vi);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vui, 0, &vui);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vui, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vbi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vbi, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vbi, 0, &vbi);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vf, 0, &vf);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvx(vf, 0, ¶m_f);
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.stvx
/* vec_ste */
- vec_ste(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvebx
- vec_ste(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvebx
- vec_ste(vbc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvebx
- vec_ste(vbc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvebx
- vec_ste(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvehx
- vec_ste(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvehx
- vec_ste(vbs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvehx
- vec_ste(vbs, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvehx
- vec_ste(vp, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvehx
- vec_ste(vp, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvehx
- vec_ste(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvewx
- vec_ste(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvewx
- vec_ste(vbi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvewx
- vec_ste(vbi, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvewx
- vec_ste(vf, 0, ¶m_f); // CHECK: @llvm.ppc.altivec.stvewx
- vec_stvebx(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvebx
- vec_stvebx(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvebx
- vec_stvebx(vbc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvebx
- vec_stvebx(vbc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvebx
- vec_stvehx(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvehx
- vec_stvehx(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvehx
- vec_stvehx(vbs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvehx
- vec_stvehx(vbs, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvehx
- vec_stvehx(vp, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvehx
- vec_stvehx(vp, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvehx
- vec_stvewx(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvewx
- vec_stvewx(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvewx
- vec_stvewx(vbi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvewx
- vec_stvewx(vbi, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvewx
- vec_stvewx(vf, 0, ¶m_f); // CHECK: @llvm.ppc.altivec.stvewx
+ vec_ste(vsc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.stvebx
+// CHECK-LE: @llvm.ppc.altivec.stvebx
+
+ vec_ste(vuc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvebx
+// CHECK-LE: @llvm.ppc.altivec.stvebx
+
+ vec_ste(vbc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.stvebx
+// CHECK-LE: @llvm.ppc.altivec.stvebx
+
+ vec_ste(vbc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvebx
+// CHECK-LE: @llvm.ppc.altivec.stvebx
+
+ vec_ste(vs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvehx
+// CHECK-LE: @llvm.ppc.altivec.stvehx
+
+ vec_ste(vus, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvehx
+// CHECK-LE: @llvm.ppc.altivec.stvehx
+
+ vec_ste(vbs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvehx
+// CHECK-LE: @llvm.ppc.altivec.stvehx
+
+ vec_ste(vbs, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvehx
+// CHECK-LE: @llvm.ppc.altivec.stvehx
+
+ vec_ste(vp, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvehx
+// CHECK-LE: @llvm.ppc.altivec.stvehx
+
+ vec_ste(vp, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvehx
+// CHECK-LE: @llvm.ppc.altivec.stvehx
+
+ vec_ste(vi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.stvewx
+// CHECK-LE: @llvm.ppc.altivec.stvewx
+
+ vec_ste(vui, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.stvewx
+// CHECK-LE: @llvm.ppc.altivec.stvewx
+
+ vec_ste(vbi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.stvewx
+// CHECK-LE: @llvm.ppc.altivec.stvewx
+
+ vec_ste(vbi, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.stvewx
+// CHECK-LE: @llvm.ppc.altivec.stvewx
+
+ vec_ste(vf, 0, ¶m_f);
+// CHECK: @llvm.ppc.altivec.stvewx
+// CHECK-LE: @llvm.ppc.altivec.stvewx
+
+ vec_stvebx(vsc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.stvebx
+// CHECK-LE: @llvm.ppc.altivec.stvebx
+
+ vec_stvebx(vuc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvebx
+// CHECK-LE: @llvm.ppc.altivec.stvebx
+
+ vec_stvebx(vbc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.stvebx
+// CHECK-LE: @llvm.ppc.altivec.stvebx
+
+ vec_stvebx(vbc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvebx
+// CHECK-LE: @llvm.ppc.altivec.stvebx
+
+ vec_stvehx(vs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvehx
+// CHECK-LE: @llvm.ppc.altivec.stvehx
+
+ vec_stvehx(vus, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvehx
+// CHECK-LE: @llvm.ppc.altivec.stvehx
+
+ vec_stvehx(vbs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvehx
+// CHECK-LE: @llvm.ppc.altivec.stvehx
+
+ vec_stvehx(vbs, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvehx
+// CHECK-LE: @llvm.ppc.altivec.stvehx
+
+ vec_stvehx(vp, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvehx
+// CHECK-LE: @llvm.ppc.altivec.stvehx
+
+ vec_stvehx(vp, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvehx
+// CHECK-LE: @llvm.ppc.altivec.stvehx
+
+ vec_stvewx(vi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.stvewx
+// CHECK-LE: @llvm.ppc.altivec.stvewx
+
+ vec_stvewx(vui, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.stvewx
+// CHECK-LE: @llvm.ppc.altivec.stvewx
+
+ vec_stvewx(vbi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.stvewx
+// CHECK-LE: @llvm.ppc.altivec.stvewx
+
+ vec_stvewx(vbi, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.stvewx
+// CHECK-LE: @llvm.ppc.altivec.stvewx
+
+ vec_stvewx(vf, 0, ¶m_f);
+// CHECK: @llvm.ppc.altivec.stvewx
+// CHECK-LE: @llvm.ppc.altivec.stvewx
/* vec_stl */
- vec_stl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vbc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vbc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vbc, 0, &vbc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vbs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vbs, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vp, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vp, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vp, 0, &vp); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vbi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vbi, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stl(vf, 0, ¶m_f); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vbc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vbc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vbc, 0, &vbc); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vbs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vbs, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vp, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vp, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vp, 0, &vp); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vbi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vbi, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
- vec_stvxl(vf, 0, ¶m_f); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stl(vsc, 0, &vsc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vsc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vuc, 0, &vuc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vuc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vbc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vbc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vbc, 0, &vbc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vs, 0, &vs);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vus, 0, &vus);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vus, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vbs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vbs, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vbs, 0, &vbs);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vp, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vp, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vp, 0, &vp);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vi, 0, &vi);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vui, 0, &vui);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vui, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vbi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vbi, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vbi, 0, &vbi);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vf, 0, &vf);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stl(vf, 0, ¶m_f);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vsc, 0, &vsc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vsc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vuc, 0, &vuc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vuc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vbc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vbc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vbc, 0, &vbc);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vs, 0, &vs);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vus, 0, &vus);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vus, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vbs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vbs, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vbs, 0, &vbs);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vp, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vp, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vp, 0, &vp);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vi, 0, &vi);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vui, 0, &vui);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vui, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vbi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vbi, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vbi, 0, &vbi);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vf, 0, &vf);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvxl(vf, 0, ¶m_f);
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.stvxl
/* vec_sub */
- res_vsc = vec_sub(vsc, vsc); // CHECK: sub <16 x i8>
- res_vsc = vec_sub(vbc, vsc); // CHECK: sub <16 x i8>
- res_vsc = vec_sub(vsc, vbc); // CHECK: sub <16 x i8>
- res_vuc = vec_sub(vuc, vuc); // CHECK: sub <16 x i8>
- res_vuc = vec_sub(vbc, vuc); // CHECK: sub <16 x i8>
- res_vuc = vec_sub(vuc, vbc); // CHECK: sub <16 x i8>
- res_vs = vec_sub(vs, vs); // CHECK: sub <8 x i16>
- res_vs = vec_sub(vbs, vs); // CHECK: sub <8 x i16>
- res_vs = vec_sub(vs, vbs); // CHECK: sub <8 x i16>
- res_vus = vec_sub(vus, vus); // CHECK: sub <8 x i16>
- res_vus = vec_sub(vbs, vus); // CHECK: sub <8 x i16>
- res_vus = vec_sub(vus, vbs); // CHECK: sub <8 x i16>
- res_vi = vec_sub(vi, vi); // CHECK: sub <4 x i32>
- res_vi = vec_sub(vbi, vi); // CHECK: sub <4 x i32>
- res_vi = vec_sub(vi, vbi); // CHECK: sub <4 x i32>
- res_vui = vec_sub(vui, vui); // CHECK: sub <4 x i32>
- res_vui = vec_sub(vbi, vui); // CHECK: sub <4 x i32>
- res_vui = vec_sub(vui, vbi); // CHECK: sub <4 x i32>
- res_vf = vec_sub(vf, vf); // CHECK: fsub <4 x float>
- res_vsc = vec_vsububm(vsc, vsc); // CHECK: sub <16 x i8>
- res_vsc = vec_vsububm(vbc, vsc); // CHECK: sub <16 x i8>
- res_vsc = vec_vsububm(vsc, vbc); // CHECK: sub <16 x i8>
- res_vuc = vec_vsububm(vuc, vuc); // CHECK: sub <16 x i8>
- res_vuc = vec_vsububm(vbc, vuc); // CHECK: sub <16 x i8>
- res_vuc = vec_vsububm(vuc, vbc); // CHECK: sub <16 x i8>
- res_vs = vec_vsubuhm(vs, vs); // CHECK: sub <8 x i16>
- res_vs = vec_vsubuhm(vbs, vus); // CHECK: sub <8 x i16>
- res_vs = vec_vsubuhm(vus, vbs); // CHECK: sub <8 x i16>
- res_vus = vec_vsubuhm(vus, vus); // CHECK: sub <8 x i16>
- res_vus = vec_vsubuhm(vbs, vus); // CHECK: sub <8 x i16>
- res_vus = vec_vsubuhm(vus, vbs); // CHECK: sub <8 x i16>
- res_vi = vec_vsubuwm(vi, vi); // CHECK: sub <4 x i32>
- res_vi = vec_vsubuwm(vbi, vi); // CHECK: sub <4 x i32>
- res_vi = vec_vsubuwm(vi, vbi); // CHECK: sub <4 x i32>
- res_vui = vec_vsubuwm(vui, vui); // CHECK: sub <4 x i32>
- res_vui = vec_vsubuwm(vbi, vui); // CHECK: sub <4 x i32>
- res_vui = vec_vsubuwm(vui, vbi); // CHECK: sub <4 x i32>
- res_vf = vec_vsubfp(vf, vf); // CHECK: fsub <4 x float>
+ res_vsc = vec_sub(vsc, vsc);
+// CHECK: sub <16 x i8>
+// CHECK-LE: sub <16 x i8>
+
+ res_vsc = vec_sub(vbc, vsc);
+// CHECK: sub <16 x i8>
+// CHECK-LE: sub <16 x i8>
+
+ res_vsc = vec_sub(vsc, vbc);
+// CHECK: sub <16 x i8>
+// CHECK-LE: sub <16 x i8>
+
+ res_vuc = vec_sub(vuc, vuc);
+// CHECK: sub <16 x i8>
+// CHECK-LE: sub <16 x i8>
+
+ res_vuc = vec_sub(vbc, vuc);
+// CHECK: sub <16 x i8>
+// CHECK-LE: sub <16 x i8>
+
+ res_vuc = vec_sub(vuc, vbc);
+// CHECK: sub <16 x i8>
+// CHECK-LE: sub <16 x i8>
+
+ res_vs = vec_sub(vs, vs);
+// CHECK: sub <8 x i16>
+// CHECK-LE: sub <8 x i16>
+
+ res_vs = vec_sub(vbs, vs);
+// CHECK: sub <8 x i16>
+// CHECK-LE: sub <8 x i16>
+
+ res_vs = vec_sub(vs, vbs);
+// CHECK: sub <8 x i16>
+// CHECK-LE: sub <8 x i16>
+
+ res_vus = vec_sub(vus, vus);
+// CHECK: sub <8 x i16>
+// CHECK-LE: sub <8 x i16>
+
+ res_vus = vec_sub(vbs, vus);
+// CHECK: sub <8 x i16>
+// CHECK-LE: sub <8 x i16>
+
+ res_vus = vec_sub(vus, vbs);
+// CHECK: sub <8 x i16>
+// CHECK-LE: sub <8 x i16>
+
+ res_vi = vec_sub(vi, vi);
+// CHECK: sub <4 x i32>
+// CHECK-LE: sub <4 x i32>
+
+ res_vi = vec_sub(vbi, vi);
+// CHECK: sub <4 x i32>
+// CHECK-LE: sub <4 x i32>
+
+ res_vi = vec_sub(vi, vbi);
+// CHECK: sub <4 x i32>
+// CHECK-LE: sub <4 x i32>
+
+ res_vui = vec_sub(vui, vui);
+// CHECK: sub <4 x i32>
+// CHECK-LE: sub <4 x i32>
+
+ res_vui = vec_sub(vbi, vui);
+// CHECK: sub <4 x i32>
+// CHECK-LE: sub <4 x i32>
+
+ res_vui = vec_sub(vui, vbi);
+// CHECK: sub <4 x i32>
+// CHECK-LE: sub <4 x i32>
+
+ res_vf = vec_sub(vf, vf);
+// CHECK: fsub <4 x float>
+// CHECK-LE: fsub <4 x float>
+
+ res_vsc = vec_vsububm(vsc, vsc);
+// CHECK: sub <16 x i8>
+// CHECK-LE: sub <16 x i8>
+
+ res_vsc = vec_vsububm(vbc, vsc);
+// CHECK: sub <16 x i8>
+// CHECK-LE: sub <16 x i8>
+
+ res_vsc = vec_vsububm(vsc, vbc);
+// CHECK: sub <16 x i8>
+// CHECK-LE: sub <16 x i8>
+
+ res_vuc = vec_vsububm(vuc, vuc);
+// CHECK: sub <16 x i8>
+// CHECK-LE: sub <16 x i8>
+
+ res_vuc = vec_vsububm(vbc, vuc);
+// CHECK: sub <16 x i8>
+// CHECK-LE: sub <16 x i8>
+
+ res_vuc = vec_vsububm(vuc, vbc);
+// CHECK: sub <16 x i8>
+// CHECK-LE: sub <16 x i8>
+
+ res_vs = vec_vsubuhm(vs, vs);
+// CHECK: sub <8 x i16>
+// CHECK-LE: sub <8 x i16>
+
+ res_vs = vec_vsubuhm(vbs, vus);
+// CHECK: sub <8 x i16>
+// CHECK-LE: sub <8 x i16>
+
+ res_vs = vec_vsubuhm(vus, vbs);
+// CHECK: sub <8 x i16>
+// CHECK-LE: sub <8 x i16>
+
+ res_vus = vec_vsubuhm(vus, vus);
+// CHECK: sub <8 x i16>
+// CHECK-LE: sub <8 x i16>
+
+ res_vus = vec_vsubuhm(vbs, vus);
+// CHECK: sub <8 x i16>
+// CHECK-LE: sub <8 x i16>
+
+ res_vus = vec_vsubuhm(vus, vbs);
+// CHECK: sub <8 x i16>
+// CHECK-LE: sub <8 x i16>
+
+ res_vi = vec_vsubuwm(vi, vi);
+// CHECK: sub <4 x i32>
+// CHECK-LE: sub <4 x i32>
+
+ res_vi = vec_vsubuwm(vbi, vi);
+// CHECK: sub <4 x i32>
+// CHECK-LE: sub <4 x i32>
+
+ res_vi = vec_vsubuwm(vi, vbi);
+// CHECK: sub <4 x i32>
+// CHECK-LE: sub <4 x i32>
+
+ res_vui = vec_vsubuwm(vui, vui);
+// CHECK: sub <4 x i32>
+// CHECK-LE: sub <4 x i32>
+
+ res_vui = vec_vsubuwm(vbi, vui);
+// CHECK: sub <4 x i32>
+// CHECK-LE: sub <4 x i32>
+
+ res_vui = vec_vsubuwm(vui, vbi);
+// CHECK: sub <4 x i32>
+// CHECK-LE: sub <4 x i32>
+
+ res_vf = vec_vsubfp(vf, vf);
+// CHECK: fsub <4 x float>
+// CHECK-LE: fsub <4 x float>
/* vec_subc */
- res_vui = vec_subc(vui, vui); // CHECK: @llvm.ppc.altivec.vsubcuw
- res_vui = vec_vsubcuw(vui, vui); // CHECK: @llvm.ppc.altivec.vsubcuw
+ res_vui = vec_subc(vui, vui);
+// CHECK: @llvm.ppc.altivec.vsubcuw
+// CHECK-LE: @llvm.ppc.altivec.vsubcuw
+
+ res_vui = vec_vsubcuw(vui, vui);
+// CHECK: @llvm.ppc.altivec.vsubcuw
+// CHECK-LE: @llvm.ppc.altivec.vsubcuw
/* vec_subs */
- res_vsc = vec_subs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
- res_vsc = vec_subs(vbc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
- res_vsc = vec_subs(vsc, vbc); // CHECK: @llvm.ppc.altivec.vsubsbs
- res_vuc = vec_subs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
- res_vuc = vec_subs(vbc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
- res_vuc = vec_subs(vuc, vbc); // CHECK: @llvm.ppc.altivec.vsububs
- res_vs = vec_subs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
- res_vs = vec_subs(vbs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
- res_vs = vec_subs(vs, vbs); // CHECK: @llvm.ppc.altivec.vsubshs
- res_vus = vec_subs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
- res_vus = vec_subs(vbs, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
- res_vus = vec_subs(vus, vbs); // CHECK: @llvm.ppc.altivec.vsubuhs
- res_vi = vec_subs(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
- res_vi = vec_subs(vbi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
- res_vi = vec_subs(vi, vbi); // CHECK: @llvm.ppc.altivec.vsubsws
- res_vui = vec_subs(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws
- res_vui = vec_subs(vbi, vui); // CHECK: @llvm.ppc.altivec.vsubuws
- res_vui = vec_subs(vui, vbi); // CHECK: @llvm.ppc.altivec.vsubuws
- res_vsc = vec_vsubsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
- res_vsc = vec_vsubsbs(vbc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
- res_vsc = vec_vsubsbs(vsc, vbc); // CHECK: @llvm.ppc.altivec.vsubsbs
- res_vuc = vec_vsububs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
- res_vuc = vec_vsububs(vbc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
- res_vuc = vec_vsububs(vuc, vbc); // CHECK: @llvm.ppc.altivec.vsububs
- res_vs = vec_vsubshs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
- res_vs = vec_vsubshs(vbs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
- res_vs = vec_vsubshs(vs, vbs); // CHECK: @llvm.ppc.altivec.vsubshs
- res_vus = vec_vsubuhs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
- res_vus = vec_vsubuhs(vbs, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
- res_vus = vec_vsubuhs(vus, vbs); // CHECK: @llvm.ppc.altivec.vsubuhs
- res_vi = vec_vsubsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
- res_vi = vec_vsubsws(vbi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
- res_vi = vec_vsubsws(vi, vbi); // CHECK: @llvm.ppc.altivec.vsubsws
- res_vui = vec_vsubuws(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws
- res_vui = vec_vsubuws(vbi, vui); // CHECK: @llvm.ppc.altivec.vsubuws
- res_vui = vec_vsubuws(vui, vbi); // CHECK: @llvm.ppc.altivec.vsubuws
+ res_vsc = vec_subs(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vsubsbs
+// CHECK-LE: @llvm.ppc.altivec.vsubsbs
+
+ res_vsc = vec_subs(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vsubsbs
+// CHECK-LE: @llvm.ppc.altivec.vsubsbs
+
+ res_vsc = vec_subs(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vsubsbs
+// CHECK-LE: @llvm.ppc.altivec.vsubsbs
+
+ res_vuc = vec_subs(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vsububs
+// CHECK-LE: @llvm.ppc.altivec.vsububs
+
+ res_vuc = vec_subs(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vsububs
+// CHECK-LE: @llvm.ppc.altivec.vsububs
+
+ res_vuc = vec_subs(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vsububs
+// CHECK-LE: @llvm.ppc.altivec.vsububs
+
+ res_vs = vec_subs(vs, vs);
+// CHECK: @llvm.ppc.altivec.vsubshs
+// CHECK-LE: @llvm.ppc.altivec.vsubshs
+
+ res_vs = vec_subs(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vsubshs
+// CHECK-LE: @llvm.ppc.altivec.vsubshs
+
+ res_vs = vec_subs(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vsubshs
+// CHECK-LE: @llvm.ppc.altivec.vsubshs
+
+ res_vus = vec_subs(vus, vus);
+// CHECK: @llvm.ppc.altivec.vsubuhs
+// CHECK-LE: @llvm.ppc.altivec.vsubuhs
+
+ res_vus = vec_subs(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vsubuhs
+// CHECK-LE: @llvm.ppc.altivec.vsubuhs
+
+ res_vus = vec_subs(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vsubuhs
+// CHECK-LE: @llvm.ppc.altivec.vsubuhs
+
+ res_vi = vec_subs(vi, vi);
+// CHECK: @llvm.ppc.altivec.vsubsws
+// CHECK-LE: @llvm.ppc.altivec.vsubsws
+
+ res_vi = vec_subs(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vsubsws
+// CHECK-LE: @llvm.ppc.altivec.vsubsws
+
+ res_vi = vec_subs(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vsubsws
+// CHECK-LE: @llvm.ppc.altivec.vsubsws
+
+ res_vui = vec_subs(vui, vui);
+// CHECK: @llvm.ppc.altivec.vsubuws
+// CHECK-LE: @llvm.ppc.altivec.vsubuws
+
+ res_vui = vec_subs(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vsubuws
+// CHECK-LE: @llvm.ppc.altivec.vsubuws
+
+ res_vui = vec_subs(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vsubuws
+// CHECK-LE: @llvm.ppc.altivec.vsubuws
+
+ res_vsc = vec_vsubsbs(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vsubsbs
+// CHECK-LE: @llvm.ppc.altivec.vsubsbs
+
+ res_vsc = vec_vsubsbs(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vsubsbs
+// CHECK-LE: @llvm.ppc.altivec.vsubsbs
+
+ res_vsc = vec_vsubsbs(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vsubsbs
+// CHECK-LE: @llvm.ppc.altivec.vsubsbs
+
+ res_vuc = vec_vsububs(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vsububs
+// CHECK-LE: @llvm.ppc.altivec.vsububs
+
+ res_vuc = vec_vsububs(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vsububs
+// CHECK-LE: @llvm.ppc.altivec.vsububs
+
+ res_vuc = vec_vsububs(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vsububs
+// CHECK-LE: @llvm.ppc.altivec.vsububs
+
+ res_vs = vec_vsubshs(vs, vs);
+// CHECK: @llvm.ppc.altivec.vsubshs
+// CHECK-LE: @llvm.ppc.altivec.vsubshs
+
+ res_vs = vec_vsubshs(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vsubshs
+// CHECK-LE: @llvm.ppc.altivec.vsubshs
+
+ res_vs = vec_vsubshs(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vsubshs
+// CHECK-LE: @llvm.ppc.altivec.vsubshs
+
+ res_vus = vec_vsubuhs(vus, vus);
+// CHECK: @llvm.ppc.altivec.vsubuhs
+// CHECK-LE: @llvm.ppc.altivec.vsubuhs
+
+ res_vus = vec_vsubuhs(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vsubuhs
+// CHECK-LE: @llvm.ppc.altivec.vsubuhs
+
+ res_vus = vec_vsubuhs(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vsubuhs
+// CHECK-LE: @llvm.ppc.altivec.vsubuhs
+
+ res_vi = vec_vsubsws(vi, vi);
+// CHECK: @llvm.ppc.altivec.vsubsws
+// CHECK-LE: @llvm.ppc.altivec.vsubsws
+
+ res_vi = vec_vsubsws(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vsubsws
+// CHECK-LE: @llvm.ppc.altivec.vsubsws
+
+ res_vi = vec_vsubsws(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vsubsws
+// CHECK-LE: @llvm.ppc.altivec.vsubsws
+
+ res_vui = vec_vsubuws(vui, vui);
+// CHECK: @llvm.ppc.altivec.vsubuws
+// CHECK-LE: @llvm.ppc.altivec.vsubuws
+
+ res_vui = vec_vsubuws(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vsubuws
+// CHECK-LE: @llvm.ppc.altivec.vsubuws
+
+ res_vui = vec_vsubuws(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vsubuws
+// CHECK-LE: @llvm.ppc.altivec.vsubuws
/* vec_sum4s */
- res_vi = vec_sum4s(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs
- res_vui = vec_sum4s(vuc, vui); // CHECK: @llvm.ppc.altivec.vsum4ubs
- res_vi = vec_sum4s(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs
- res_vi = vec_vsum4sbs(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs
- res_vui = vec_vsum4ubs(vuc, vui); // CHECK: @llvm.ppc.altivec.vsum4ubs
- res_vi = vec_vsum4shs(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs
+ res_vi = vec_sum4s(vsc, vi);
+// CHECK: @llvm.ppc.altivec.vsum4sbs
+// CHECK-LE: @llvm.ppc.altivec.vsum4sbs
+
+ res_vui = vec_sum4s(vuc, vui);
+// CHECK: @llvm.ppc.altivec.vsum4ubs
+// CHECK-LE: @llvm.ppc.altivec.vsum4ubs
+
+ res_vi = vec_sum4s(vs, vi);
+// CHECK: @llvm.ppc.altivec.vsum4shs
+// CHECK-LE: @llvm.ppc.altivec.vsum4shs
+
+ res_vi = vec_vsum4sbs(vsc, vi);
+// CHECK: @llvm.ppc.altivec.vsum4sbs
+// CHECK-LE: @llvm.ppc.altivec.vsum4sbs
+
+ res_vui = vec_vsum4ubs(vuc, vui);
+// CHECK: @llvm.ppc.altivec.vsum4ubs
+// CHECK-LE: @llvm.ppc.altivec.vsum4ubs
+
+ res_vi = vec_vsum4shs(vs, vi);
+// CHECK: @llvm.ppc.altivec.vsum4shs
+// CHECK-LE: @llvm.ppc.altivec.vsum4shs
/* vec_sum2s */
- res_vi = vec_sum2s(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws
- res_vi = vec_vsum2sws(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws
+ res_vi = vec_sum2s(vi, vi);
+// CHECK: @llvm.ppc.altivec.vsum2sws
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vsum2sws
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_vsum2sws(vi, vi);
+// CHECK: @llvm.ppc.altivec.vsum2sws
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vsum2sws
+// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_sums */
- res_vi = vec_sums(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws
- res_vi = vec_vsumsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws
+ res_vi = vec_sums(vi, vi);
+// CHECK: @llvm.ppc.altivec.vsumsws
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vsumsws
+
+ res_vi = vec_vsumsws(vi, vi);
+// CHECK: @llvm.ppc.altivec.vsumsws
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vsumsws
/* vec_trunc */
- res_vf = vec_trunc(vf); // CHECK: @llvm.ppc.altivec.vrfiz
- res_vf = vec_vrfiz(vf); // CHECK: @llvm.ppc.altivec.vrfiz
+ res_vf = vec_trunc(vf);
+// CHECK: @llvm.ppc.altivec.vrfiz
+// CHECK-LE: @llvm.ppc.altivec.vrfiz
+
+ res_vf = vec_vrfiz(vf);
+// CHECK: @llvm.ppc.altivec.vrfiz
+// CHECK-LE: @llvm.ppc.altivec.vrfiz
/* vec_unpackh */
- res_vs = vec_unpackh(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
- res_vbs = vec_unpackh(vbc); // CHECK: @llvm.ppc.altivec.vupkhsb
- res_vi = vec_unpackh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
- res_vbi = vec_unpackh(vbs); // CHECK: @llvm.ppc.altivec.vupkhsh
- res_vui = vec_unpackh(vp); // CHECK: @llvm.ppc.altivec.vupkhsh
- res_vs = vec_vupkhsb(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
- res_vbs = vec_vupkhsb(vbc); // CHECK: @llvm.ppc.altivec.vupkhsb
- res_vi = vec_vupkhsh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
- res_vbi = vec_vupkhsh(vbs); // CHECK: @llvm.ppc.altivec.vupkhsh
- res_vui = vec_vupkhsh(vp); // CHECK: @llvm.ppc.altivec.vupkhsh
+ res_vs = vec_unpackh(vsc);
+// CHECK: @llvm.ppc.altivec.vupkhsb
+// CHECK-LE: @llvm.ppc.altivec.vupklsb
+
+ res_vbs = vec_unpackh(vbc);
+// CHECK: @llvm.ppc.altivec.vupkhsb
+// CHECK-LE: @llvm.ppc.altivec.vupklsb
+
+ res_vi = vec_unpackh(vs);
+// CHECK: @llvm.ppc.altivec.vupkhsh
+// CHECK-LE: @llvm.ppc.altivec.vupklsh
+
+ res_vbi = vec_unpackh(vbs);
+// CHECK: @llvm.ppc.altivec.vupkhsh
+// CHECK-LE: @llvm.ppc.altivec.vupklsh
+
+ res_vui = vec_unpackh(vp);
+// CHECK: @llvm.ppc.altivec.vupkhpx
+// CHECK-LE: @llvm.ppc.altivec.vupklpx
+
+ res_vs = vec_vupkhsb(vsc);
+// CHECK: @llvm.ppc.altivec.vupkhsb
+// CHECK-LE: @llvm.ppc.altivec.vupklsb
+
+ res_vbs = vec_vupkhsb(vbc);
+// CHECK: @llvm.ppc.altivec.vupkhsb
+// CHECK-LE: @llvm.ppc.altivec.vupklsb
+
+ res_vi = vec_vupkhsh(vs);
+// CHECK: @llvm.ppc.altivec.vupkhsh
+// CHECK-LE: @llvm.ppc.altivec.vupklsh
+
+ res_vbi = vec_vupkhsh(vbs);
+// CHECK: @llvm.ppc.altivec.vupkhsh
+// CHECK-LE: @llvm.ppc.altivec.vupklsh
+
+ res_vui = vec_vupkhsh(vp);
+// CHECK: @llvm.ppc.altivec.vupkhpx
+// CHECK-LE: @llvm.ppc.altivec.vupklpx
/* vec_unpackl */
- res_vs = vec_unpackl(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
- res_vbs = vec_unpackl(vbc); // CHECK: @llvm.ppc.altivec.vupklsb
- res_vi = vec_unpackl(vs); // CHECK: @llvm.ppc.altivec.vupklsh
- res_vbi = vec_unpackl(vbs); // CHECK: @llvm.ppc.altivec.vupklsh
- res_vui = vec_unpackl(vp); // CHECK: @llvm.ppc.altivec.vupklsh
- res_vs = vec_vupklsb(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
- res_vbs = vec_vupklsb(vbc); // CHECK: @llvm.ppc.altivec.vupklsb
- res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh
- res_vbi = vec_vupklsh(vbs); // CHECK: @llvm.ppc.altivec.vupklsh
- res_vui = vec_vupklsh(vp); // CHECK: @llvm.ppc.altivec.vupklsh
+ res_vs = vec_unpackl(vsc);
+// CHECK: @llvm.ppc.altivec.vupklsb
+// CHECK-LE: @llvm.ppc.altivec.vupkhsb
+
+ res_vbs = vec_unpackl(vbc);
+// CHECK: @llvm.ppc.altivec.vupklsb
+// CHECK-LE: @llvm.ppc.altivec.vupkhsb
+
+ res_vi = vec_unpackl(vs);
+// CHECK: @llvm.ppc.altivec.vupklsh
+// CHECK-LE: @llvm.ppc.altivec.vupkhsh
+
+ res_vbi = vec_unpackl(vbs);
+// CHECK: @llvm.ppc.altivec.vupklsh
+// CHECK-LE: @llvm.ppc.altivec.vupkhsh
+
+ res_vui = vec_unpackl(vp);
+// CHECK: @llvm.ppc.altivec.vupklpx
+// CHECK-LE: @llvm.ppc.altivec.vupkhpx
+
+ res_vs = vec_vupklsb(vsc);
+// CHECK: @llvm.ppc.altivec.vupklsb
+// CHECK-LE: @llvm.ppc.altivec.vupkhsb
+
+ res_vbs = vec_vupklsb(vbc);
+// CHECK: @llvm.ppc.altivec.vupklsb
+// CHECK-LE: @llvm.ppc.altivec.vupkhsb
+
+ res_vi = vec_vupklsh(vs);
+// CHECK: @llvm.ppc.altivec.vupklsh
+// CHECK-LE: @llvm.ppc.altivec.vupkhsh
+
+ res_vbi = vec_vupklsh(vbs);
+// CHECK: @llvm.ppc.altivec.vupklsh
+// CHECK-LE: @llvm.ppc.altivec.vupkhsh
+
+ res_vui = vec_vupklsh(vp);
+// CHECK: @llvm.ppc.altivec.vupklpx
+// CHECK-LE: @llvm.ppc.altivec.vupkhpx
/* vec_xor */
- res_vsc = vec_xor(vsc, vsc); // CHECK: xor <16 x i8>
- res_vsc = vec_xor(vbc, vsc); // CHECK: xor <16 x i8>
- res_vsc = vec_xor(vsc, vbc); // CHECK: xor <16 x i8>
- res_vuc = vec_xor(vuc, vuc); // CHECK: xor <16 x i8>
- res_vuc = vec_xor(vbc, vuc); // CHECK: xor <16 x i8>
- res_vuc = vec_xor(vuc, vbc); // CHECK: xor <16 x i8>
- res_vbc = vec_xor(vbc, vbc); // CHECK: xor <16 x i8>
- res_vs = vec_xor(vs, vs); // CHECK: xor <8 x i16>
- res_vs = vec_xor(vbs, vs); // CHECK: xor <8 x i16>
- res_vs = vec_xor(vs, vbs); // CHECK: xor <8 x i16>
- res_vus = vec_xor(vus, vus); // CHECK: xor <8 x i16>
- res_vus = vec_xor(vbs, vus); // CHECK: xor <8 x i16>
- res_vus = vec_xor(vus, vbs); // CHECK: xor <8 x i16>
- res_vbs = vec_xor(vbs, vbs); // CHECK: xor <8 x i16>
- res_vi = vec_xor(vi, vi); // CHECK: xor <4 x i32>
- res_vi = vec_xor(vbi, vi); // CHECK: xor <4 x i32>
- res_vi = vec_xor(vi, vbi); // CHECK: xor <4 x i32>
- res_vui = vec_xor(vui, vui); // CHECK: xor <4 x i32>
- res_vui = vec_xor(vbi, vui); // CHECK: xor <4 x i32>
- res_vui = vec_xor(vui, vbi); // CHECK: xor <4 x i32>
- res_vbi = vec_xor(vbi, vbi); // CHECK: xor <4 x i32>
- res_vf = vec_xor(vf, vf); // CHECK: xor <4 x i32>
- res_vf = vec_xor(vbi, vf); // CHECK: xor <4 x i32>
- res_vf = vec_xor(vf, vbi); // CHECK: xor <4 x i32>
- res_vsc = vec_vxor(vsc, vsc); // CHECK: xor <16 x i8>
- res_vsc = vec_vxor(vbc, vsc); // CHECK: xor <16 x i8>
- res_vsc = vec_vxor(vsc, vbc); // CHECK: xor <16 x i8>
- res_vuc = vec_vxor(vuc, vuc); // CHECK: xor <16 x i8>
- res_vuc = vec_vxor(vbc, vuc); // CHECK: xor <16 x i8>
- res_vuc = vec_vxor(vuc, vbc); // CHECK: xor <16 x i8>
- res_vbc = vec_vxor(vbc, vbc); // CHECK: xor <16 x i8>
- res_vs = vec_vxor(vs, vs); // CHECK: xor <8 x i16>
- res_vs = vec_vxor(vbs, vs); // CHECK: xor <8 x i16>
- res_vs = vec_vxor(vs, vbs); // CHECK: xor <8 x i16>
- res_vus = vec_vxor(vus, vus); // CHECK: xor <8 x i16>
- res_vus = vec_vxor(vbs, vus); // CHECK: xor <8 x i16>
- res_vus = vec_vxor(vus, vbs); // CHECK: xor <8 x i16>
- res_vbs = vec_vxor(vbs, vbs); // CHECK: xor <8 x i16>
- res_vi = vec_vxor(vi, vi); // CHECK: xor <4 x i32>
- res_vi = vec_vxor(vbi, vi); // CHECK: xor <4 x i32>
- res_vi = vec_vxor(vi, vbi); // CHECK: xor <4 x i32>
- res_vui = vec_vxor(vui, vui); // CHECK: xor <4 x i32>
- res_vui = vec_vxor(vbi, vui); // CHECK: xor <4 x i32>
- res_vui = vec_vxor(vui, vbi); // CHECK: xor <4 x i32>
- res_vbi = vec_vxor(vbi, vbi); // CHECK: xor <4 x i32>
- res_vf = vec_vxor(vf, vf); // CHECK: xor <4 x i32>
- res_vf = vec_vxor(vbi, vf); // CHECK: xor <4 x i32>
- res_vf = vec_vxor(vf, vbi); // CHECK: xor <4 x i32>
+ res_vsc = vec_xor(vsc, vsc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vsc = vec_xor(vbc, vsc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vsc = vec_xor(vsc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vuc = vec_xor(vuc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vuc = vec_xor(vbc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vuc = vec_xor(vuc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vbc = vec_xor(vbc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vs = vec_xor(vs, vs);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vs = vec_xor(vbs, vs);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vs = vec_xor(vs, vbs);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vus = vec_xor(vus, vus);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vus = vec_xor(vbs, vus);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vus = vec_xor(vus, vbs);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vbs = vec_xor(vbs, vbs);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vi = vec_xor(vi, vi);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vi = vec_xor(vbi, vi);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vi = vec_xor(vi, vbi);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vui = vec_xor(vui, vui);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vui = vec_xor(vbi, vui);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vui = vec_xor(vui, vbi);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vbi = vec_xor(vbi, vbi);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vf = vec_xor(vf, vf);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vf = vec_xor(vbi, vf);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vf = vec_xor(vf, vbi);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vsc = vec_vxor(vsc, vsc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vsc = vec_vxor(vbc, vsc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vsc = vec_vxor(vsc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vuc = vec_vxor(vuc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vuc = vec_vxor(vbc, vuc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vuc = vec_vxor(vuc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vbc = vec_vxor(vbc, vbc);
+// CHECK: xor <16 x i8>
+// CHECK-LE: xor <16 x i8>
+
+ res_vs = vec_vxor(vs, vs);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vs = vec_vxor(vbs, vs);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vs = vec_vxor(vs, vbs);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vus = vec_vxor(vus, vus);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vus = vec_vxor(vbs, vus);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vus = vec_vxor(vus, vbs);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vbs = vec_vxor(vbs, vbs);
+// CHECK: xor <8 x i16>
+// CHECK-LE: xor <8 x i16>
+
+ res_vi = vec_vxor(vi, vi);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vi = vec_vxor(vbi, vi);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vi = vec_vxor(vi, vbi);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vui = vec_vxor(vui, vui);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vui = vec_vxor(vbi, vui);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vui = vec_vxor(vui, vbi);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vbi = vec_vxor(vbi, vbi);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vf = vec_vxor(vf, vf);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vf = vec_vxor(vbi, vf);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
+
+ res_vf = vec_vxor(vf, vbi);
+// CHECK: xor <4 x i32>
+// CHECK-LE: xor <4 x i32>
/* ------------------------------ extensions -------------------------------------- */
/* vec_extract */
- res_sc = vec_extract(vsc, param_i); // CHECK: extractelement <16 x i8>
- res_uc = vec_extract(vuc, param_i); // CHECK: extractelement <16 x i8>
- res_s = vec_extract(vs, param_i); // CHECK: extractelement <8 x i16>
- res_us = vec_extract(vus, param_i); // CHECK: extractelement <8 x i16>
- res_i = vec_extract(vi, param_i); // CHECK: extractelement <4 x i32>
- res_ui = vec_extract(vui, param_i); // CHECK: extractelement <4 x i32>
- res_f = vec_extract(vf, param_i); // CHECK: extractelement <4 x float>
+ res_sc = vec_extract(vsc, param_i);
+// CHECK: extractelement <16 x i8>
+// CHECK-LE: extractelement <16 x i8>
+
+ res_uc = vec_extract(vuc, param_i);
+// CHECK: extractelement <16 x i8>
+// CHECK-LE: extractelement <16 x i8>
+
+ res_s = vec_extract(vs, param_i);
+// CHECK: extractelement <8 x i16>
+// CHECK-LE: extractelement <8 x i16>
+
+ res_us = vec_extract(vus, param_i);
+// CHECK: extractelement <8 x i16>
+// CHECK-LE: extractelement <8 x i16>
+
+ res_i = vec_extract(vi, param_i);
+// CHECK: extractelement <4 x i32>
+// CHECK-LE: extractelement <4 x i32>
+
+ res_ui = vec_extract(vui, param_i);
+// CHECK: extractelement <4 x i32>
+// CHECK-LE: extractelement <4 x i32>
+
+ res_f = vec_extract(vf, param_i);
+// CHECK: extractelement <4 x float>
+// CHECK-LE: extractelement <4 x float>
/* vec_insert */
- res_vsc = vec_insert(param_sc, vsc, param_i); // CHECK: insertelement <16 x i8>
- res_vuc = vec_insert(param_uc, vuc, param_i); // CHECK: insertelement <16 x i8>
- res_vs = vec_insert(param_s, vs, param_i); // CHECK: insertelement <8 x i16>
- res_vus = vec_insert(param_us, vus, param_i); // CHECK: insertelement <8 x i16>
- res_vi = vec_insert(param_i, vi, param_i); // CHECK: insertelement <4 x i32>
- res_vui = vec_insert(param_ui, vui, param_i); // CHECK: insertelement <4 x i32>
- res_vf = vec_insert(param_f, vf, param_i); // CHECK: insertelement <4 x float>
+ res_vsc = vec_insert(param_sc, vsc, param_i);
+// CHECK: insertelement <16 x i8>
+// CHECK-LE: insertelement <16 x i8>
+
+ res_vuc = vec_insert(param_uc, vuc, param_i);
+// CHECK: insertelement <16 x i8>
+// CHECK-LE: insertelement <16 x i8>
+
+ res_vs = vec_insert(param_s, vs, param_i);
+// CHECK: insertelement <8 x i16>
+// CHECK-LE: insertelement <8 x i16>
+
+ res_vus = vec_insert(param_us, vus, param_i);
+// CHECK: insertelement <8 x i16>
+// CHECK-LE: insertelement <8 x i16>
+
+ res_vi = vec_insert(param_i, vi, param_i);
+// CHECK: insertelement <4 x i32>
+// CHECK-LE: insertelement <4 x i32>
+
+ res_vui = vec_insert(param_ui, vui, param_i);
+// CHECK: insertelement <4 x i32>
+// CHECK-LE: insertelement <4 x i32>
+
+ res_vf = vec_insert(param_f, vf, param_i);
+// CHECK: insertelement <4 x float>
+// CHECK-LE: insertelement <4 x float>
/* vec_lvlx */
- res_vsc = vec_lvlx(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vsc = vec_lvlx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vuc = vec_lvlx(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vuc = vec_lvlx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vbc = vec_lvlx(0, &vbc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vs = vec_lvlx(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vs = vec_lvlx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vus = vec_lvlx(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vus = vec_lvlx(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vbs = vec_lvlx(0, &vbs); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vp = vec_lvlx(0, &vp); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vi = vec_lvlx(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vi = vec_lvlx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vui = vec_lvlx(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vui = vec_lvlx(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vbi = vec_lvlx(0, &vbi); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vf = vec_lvlx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x float> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
+ res_vsc = vec_lvlx(0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsc = vec_lvlx(0, &vsc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_lvlx(0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_lvlx(0, &vuc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_lvlx(0, &vbc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_lvlx(0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_lvlx(0, &vs);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_lvlx(0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_lvlx(0, &vus);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_lvlx(0, &vbs);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_lvlx(0, &vp);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_lvlx(0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_lvlx(0, &vi);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_lvlx(0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_lvlx(0, &vui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbi = vec_lvlx(0, &vbi);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_lvlx(0, &vf);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x float> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x float> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_lvlxl */
- res_vsc = vec_lvlxl(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vsc = vec_lvlxl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vuc = vec_lvlxl(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vuc = vec_lvlxl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vbc = vec_lvlxl(0, &vbc); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vs = vec_lvlxl(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vs = vec_lvlxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vus = vec_lvlxl(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vus = vec_lvlxl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vbs = vec_lvlxl(0, &vbs); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vp = vec_lvlxl(0, &vp); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vi = vec_lvlxl(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vi = vec_lvlxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vui = vec_lvlxl(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vui = vec_lvlxl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vbi = vec_lvlxl(0, &vbi); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vf = vec_lvlxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x float> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
+ res_vsc = vec_lvlxl(0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsc = vec_lvlxl(0, &vsc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_lvlxl(0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_lvlxl(0, &vuc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_lvlxl(0, &vbc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_lvlxl(0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_lvlxl(0, &vs);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_lvlxl(0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_lvlxl(0, &vus);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_lvlxl(0, &vbs);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_lvlxl(0, &vp);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_lvlxl(0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_lvlxl(0, &vi);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_lvlxl(0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_lvlxl(0, &vui);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbi = vec_lvlxl(0, &vbi);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_lvlxl(0, &vf);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x float> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x float> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_lvrx */
- res_vsc = vec_lvrx(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vsc = vec_lvrx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vuc = vec_lvrx(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vuc = vec_lvrx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vbc = vec_lvrx(0, &vbc); // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vs = vec_lvrx(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vs = vec_lvrx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vus = vec_lvrx(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vus = vec_lvrx(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vbs = vec_lvrx(0, &vbs); // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vp = vec_lvrx(0, &vp); // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vi = vec_lvrx(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vi = vec_lvrx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vui = vec_lvrx(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vui = vec_lvrx(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vbi = vec_lvrx(0, &vbi); // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vf = vec_lvrx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x float> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
+ res_vsc = vec_lvrx(0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsc = vec_lvrx(0, &vsc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_lvrx(0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_lvrx(0, &vuc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_lvrx(0, &vbc);
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_lvrx(0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_lvrx(0, &vs);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_lvrx(0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_lvrx(0, &vus);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_lvrx(0, &vbs);
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_lvrx(0, &vp);
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_lvrx(0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_lvrx(0, &vi);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_lvrx(0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_lvrx(0, &vui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbi = vec_lvrx(0, &vbi);
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_lvrx(0, &vf);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x float> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x float> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_lvrxl */
- res_vsc = vec_lvrxl(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vsc = vec_lvrxl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vuc = vec_lvrxl(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vuc = vec_lvrxl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vbc = vec_lvrxl(0, &vbc); // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vs = vec_lvrxl(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vs = vec_lvrxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vus = vec_lvrxl(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vus = vec_lvrxl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vbs = vec_lvrxl(0, &vbs); // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vp = vec_lvrxl(0, &vp); // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vi = vec_lvrxl(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vi = vec_lvrxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vui = vec_lvrxl(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vui = vec_lvrxl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vbi = vec_lvrxl(0, &vbi); // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
-
- res_vf = vec_lvrxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x float> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
+ res_vsc = vec_lvrxl(0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsc = vec_lvrxl(0, &vsc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_lvrxl(0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_lvrxl(0, &vuc);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbc = vec_lvrxl(0, &vbc);
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_lvrxl(0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_lvrxl(0, &vs);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_lvrxl(0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_lvrxl(0, &vus);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbs = vec_lvrxl(0, &vbs);
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vp = vec_lvrxl(0, &vp);
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_lvrxl(0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_lvrxl(0, &vi);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_lvrxl(0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_lvrxl(0, &vui);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbi = vec_lvrxl(0, &vbi);
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_lvrxl(0, &vf);
+// CHECK: @llvm.ppc.altivec.lvxl
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x float> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvxl
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x float> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_stvlx */
- vec_stvlx(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vbc, 0, &vbc); // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vbs, 0, &vbs); // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vp, 0, &vp); // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vbi, 0, &vbi); // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvlx(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x float> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvlx(vsc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vsc, 0, &vsc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vuc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vuc, 0, &vuc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vbc, 0, &vbc);
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vs, 0, &vs);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vus, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vus, 0, &vus);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vbs, 0, &vbs);
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vp, 0, &vp);
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vi, 0, &vi);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vui, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vui, 0, &vui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vbi, 0, &vbi);
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvlx(vf, 0, &vf);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x float> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x float> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
/* vec_stvlxl */
- vec_stvlxl(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vbc, 0, &vbc); // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vbs, 0, &vbs); // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vp, 0, &vp); // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vbi, 0, &vbi); // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvlxl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x float> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvlxl(vsc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vsc, 0, &vsc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vuc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vuc, 0, &vuc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vbc, 0, &vbc);
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vs, 0, &vs);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vus, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vus, 0, &vus);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vbs, 0, &vbs);
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vp, 0, &vp);
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vi, 0, &vi);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vui, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vui, 0, &vui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vbi, 0, &vbi);
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvlxl(vf, 0, &vf);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x float> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x float> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
/* vec_stvrx */
- vec_stvrx(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vbc, 0, &vbc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vp, 0, &vp); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
-
- vec_stvrx(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x float> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvrx(vsc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vsc, 0, &vsc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vuc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vuc, 0, &vuc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vbc, 0, &vbc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vs, 0, &vs);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vus, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vus, 0, &vus);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vbs, 0, &vbs);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vp, 0, &vp);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vi, 0, &vi);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vui, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vui, 0, &vui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vbi, 0, &vbi);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
+
+ vec_stvrx(vf, 0, &vf);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x float> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvx
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x float> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvx
/* vec_stvrxl */
- vec_stvrxl(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vbc, 0, &vbc); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: store <16 x i8> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vp, 0, &vp); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: store <8 x i16> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: store <4 x i32> zeroinitializer
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
-
- vec_stvrxl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.lvx
- // CHECK: @llvm.ppc.altivec.lvsl
- // CHECK: store <4 x float> zeroinitializer
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.lvsr
- // CHECK: @llvm.ppc.altivec.vperm
- // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvrxl(vsc, 0, ¶m_sc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vsc, 0, &vsc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vuc, 0, ¶m_uc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vuc, 0, &vuc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vbc, 0, &vbc);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vs, 0, ¶m_s);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vs, 0, &vs);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vus, 0, ¶m_us);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vus, 0, &vus);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vbs, 0, &vbs);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vp, 0, &vp);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vi, 0, ¶m_i);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vi, 0, &vi);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vui, 0, ¶m_ui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vui, 0, &vui);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vbi, 0, &vbi);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
+
+ vec_stvrxl(vf, 0, &vf);
+// CHECK: @llvm.ppc.altivec.lvx
+// CHECK: @llvm.ppc.altivec.lvsl
+// CHECK: store <4 x float> zeroinitializer
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.lvsr
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.altivec.stvxl
+// CHECK-LE: @llvm.ppc.altivec.lvx
+// CHECK-LE: @llvm.ppc.altivec.lvsl
+// CHECK-LE: store <4 x float> zeroinitializer
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.lvsr
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.stvxl
/* vec_promote */
- res_vsc = vec_promote(param_sc, 0); // CHECK: store <16 x i8> zeroinitializer
- // CHECK: insertelement <16 x i8>
-
- res_vuc = vec_promote(param_uc, 0); // CHECK: store <16 x i8> zeroinitializer
- // CHECK: insertelement <16 x i8>
-
- res_vs = vec_promote(param_s, 0); // CHECK: store <8 x i16> zeroinitializer
- // CHECK: insertelement <8 x i16>
-
- res_vus = vec_promote(param_us, 0); // CHECK: store <8 x i16> zeroinitializer
- // CHECK: insertelement <8 x i16>
-
- res_vi = vec_promote(param_i, 0); // CHECK: store <4 x i32> zeroinitializer
- // CHECK: insertelement <4 x i32>
-
- res_vui = vec_promote(param_ui, 0); // CHECK: store <4 x i32> zeroinitializer
- // CHECK: insertelement <4 x i32>
-
- res_vf = vec_promote(param_f, 0); // CHECK: store <4 x float> zeroinitializer
- // CHECK: insertelement <4 x float>
+ res_vsc = vec_promote(param_sc, 0);
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: insertelement <16 x i8>
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: insertelement <16 x i8>
+
+ res_vuc = vec_promote(param_uc, 0);
+// CHECK: store <16 x i8> zeroinitializer
+// CHECK: insertelement <16 x i8>
+// CHECK-LE: store <16 x i8> zeroinitializer
+// CHECK-LE: insertelement <16 x i8>
+
+ res_vs = vec_promote(param_s, 0);
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: insertelement <8 x i16>
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: insertelement <8 x i16>
+
+ res_vus = vec_promote(param_us, 0);
+// CHECK: store <8 x i16> zeroinitializer
+// CHECK: insertelement <8 x i16>
+// CHECK-LE: store <8 x i16> zeroinitializer
+// CHECK-LE: insertelement <8 x i16>
+
+ res_vi = vec_promote(param_i, 0);
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: insertelement <4 x i32>
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: insertelement <4 x i32>
+
+ res_vui = vec_promote(param_ui, 0);
+// CHECK: store <4 x i32> zeroinitializer
+// CHECK: insertelement <4 x i32>
+// CHECK-LE: store <4 x i32> zeroinitializer
+// CHECK-LE: insertelement <4 x i32>
+
+ res_vf = vec_promote(param_f, 0);
+// CHECK: store <4 x float> zeroinitializer
+// CHECK: insertelement <4 x float>
+// CHECK-LE: store <4 x float> zeroinitializer
+// CHECK-LE: insertelement <4 x float>
/* vec_splats */
- res_vsc = vec_splats(param_sc); // CHECK: insertelement <16 x i8>
-
- res_vuc = vec_splats(param_uc); // CHECK: insertelement <16 x i8>
-
- res_vs = vec_splats(param_s); // CHECK: insertelement <8 x i16>
-
- res_vus = vec_splats(param_us); // CHECK: insertelement <8 x i16>
-
- res_vi = vec_splats(param_i); // CHECK: insertelement <4 x i32>
-
- res_vui = vec_splats(param_ui); // CHECK: insertelement <4 x i32>
-
- res_vf = vec_splats(param_f); // CHECK: insertelement <4 x float>
+ res_vsc = vec_splats(param_sc);
+// CHECK: insertelement <16 x i8>
+// CHECK-LE: insertelement <16 x i8>
+
+ res_vuc = vec_splats(param_uc);
+// CHECK: insertelement <16 x i8>
+// CHECK-LE: insertelement <16 x i8>
+
+ res_vs = vec_splats(param_s);
+// CHECK: insertelement <8 x i16>
+// CHECK-LE: insertelement <8 x i16>
+
+ res_vus = vec_splats(param_us);
+// CHECK: insertelement <8 x i16>
+// CHECK-LE: insertelement <8 x i16>
+
+ res_vi = vec_splats(param_i);
+// CHECK: insertelement <4 x i32>
+// CHECK-LE: insertelement <4 x i32>
+
+ res_vui = vec_splats(param_ui);
+// CHECK: insertelement <4 x i32>
+// CHECK-LE: insertelement <4 x i32>
+
+ res_vf = vec_splats(param_f);
+// CHECK: insertelement <4 x float>
+// CHECK-LE: insertelement <4 x float>
/* ------------------------------ predicates -------------------------------------- */
/* vec_all_eq */
- res_i = vec_all_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_eq(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_eq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_eq(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_eq(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_eq(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_eq(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_eq(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_eq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_eq(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_eq(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_eq(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_eq(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_eq(vp, vp); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_eq(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_eq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_eq(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_eq(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_eq(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_eq(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+ res_i = vec_all_eq(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_eq(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_eq(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_eq(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_eq(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_eq(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_eq(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_eq(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_eq(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_eq(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_eq(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_eq(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_eq(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_eq(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_eq(vp, vp);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_eq(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_eq(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_eq(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_eq(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_eq(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_eq(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_eq(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_eq(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpeqfp.p
/* vec_all_ge */
- res_i = vec_all_ge(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_all_ge(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_all_ge(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_ge(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_ge(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_ge(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_ge(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_all_ge(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_all_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_ge(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_ge(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_ge(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_ge(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_all_ge(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_all_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_ge(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_ge(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_ge(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_ge(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
+ res_i = vec_all_ge(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_all_ge(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_all_ge(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_ge(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_ge(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_ge(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_ge(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_ge(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_all_ge(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_all_ge(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_ge(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_ge(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_ge(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_ge(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_ge(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_all_ge(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_all_ge(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_ge(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_ge(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_ge(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_ge(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_ge(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgefp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgefp.p
/* vec_all_gt */
- res_i = vec_all_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_all_gt(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_all_gt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_gt(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_gt(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_gt(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_gt(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_all_gt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_all_gt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_gt(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_gt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_gt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_gt(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_all_gt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_all_gt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_gt(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_gt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_gt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_gt(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+ res_i = vec_all_gt(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_all_gt(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_all_gt(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_gt(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_gt(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_gt(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_gt(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_gt(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_all_gt(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_all_gt(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_gt(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_gt(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_gt(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_gt(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_gt(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_all_gt(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_all_gt(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_gt(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_gt(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_gt(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_gt(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_gt(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp.p
/* vec_all_in */
- res_i = vec_all_in(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp.p
+ res_i = vec_all_in(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpbfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpbfp.p
/* vec_all_le */
- res_i = vec_all_le(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_all_le(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_all_le(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_le(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_le(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_le(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_le(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_all_le(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_all_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_le(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_le(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_le(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_le(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_all_le(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_all_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_le(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_le(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_le(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_le(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
+ res_i = vec_all_le(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_all_le(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_all_le(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_le(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_le(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_le(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_le(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_le(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_all_le(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_all_le(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_le(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_le(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_le(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_le(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_le(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_all_le(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_all_le(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_le(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_le(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_le(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_le(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_le(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgefp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgefp.p
/* vec_all_lt */
- res_i = vec_all_lt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_all_lt(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_all_lt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_lt(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_lt(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_lt(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_lt(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_all_lt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_all_lt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_all_lt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_lt(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_lt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_lt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_lt(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_all_lt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_all_lt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_all_lt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_lt(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_lt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_lt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_lt(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_all_lt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+ res_i = vec_all_lt(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_all_lt(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_all_lt(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_lt(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_lt(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_lt(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_lt(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_all_lt(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_all_lt(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_all_lt(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_lt(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_lt(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_lt(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_lt(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_all_lt(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_all_lt(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_all_lt(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_lt(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_lt(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_lt(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_lt(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_all_lt(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp.p
/* vec_all_nan */
- res_i = vec_all_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+ res_i = vec_all_nan(vf);
+// CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpeqfp.p
/* vec_all_ne */
- res_i = vec_all_ne(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_ne(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_ne(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_ne(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_ne(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_ne(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_ne(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_all_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_ne(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_ne(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_ne(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_ne(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_ne(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_ne(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_ne(vp, vp); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_all_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_ne(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_ne(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_ne(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_ne(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_ne(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_ne(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_all_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+ res_i = vec_all_ne(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_ne(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_ne(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_ne(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_ne(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_ne(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_ne(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_all_ne(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_ne(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_ne(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_ne(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_ne(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_ne(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_ne(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_ne(vp, vp);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_all_ne(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_ne(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_ne(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_ne(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_ne(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_ne(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_ne(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_all_ne(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpeqfp.p
/* vec_all_nge */
- res_i = vec_all_nge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
+ res_i = vec_all_nge(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgefp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgefp.p
/* vec_all_ngt */
- res_i = vec_all_ngt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+ res_i = vec_all_ngt(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp.p
/* vec_all_nle */
- res_i = vec_all_nle(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
+ res_i = vec_all_nle(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgefp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgefp.p
/* vec_all_nlt */
- res_i = vec_all_nlt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+ res_i = vec_all_nlt(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp.p
/* vec_all_numeric */
- res_i = vec_all_numeric(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+ res_i = vec_all_numeric(vf);
+// CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpeqfp.p
/* vec_any_eq */
- res_i = vec_any_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_eq(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_eq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_eq(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_eq(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_eq(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_eq(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_eq(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_eq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_eq(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_eq(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_eq(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_eq(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_eq(vp, vp); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_eq(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_eq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_eq(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_eq(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_eq(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_eq(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+ res_i = vec_any_eq(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_eq(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_eq(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_eq(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_eq(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_eq(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_eq(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_eq(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_eq(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_eq(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_eq(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_eq(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_eq(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_eq(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_eq(vp, vp);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_eq(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_eq(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_eq(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_eq(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_eq(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_eq(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_eq(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_eq(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpeqfp.p
/* vec_any_ge */
- res_i = vec_any_ge(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_any_ge(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_any_ge(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_ge(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_ge(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_ge(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_ge(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_any_ge(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_any_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_ge(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_ge(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_ge(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_ge(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_any_ge(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_any_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_ge(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_ge(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_ge(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_ge(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
+ res_i = vec_any_ge(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_any_ge(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_any_ge(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_ge(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_ge(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_ge(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_ge(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_ge(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_any_ge(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_any_ge(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_ge(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_ge(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_ge(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_ge(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_ge(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_any_ge(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_any_ge(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_ge(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_ge(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_ge(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_ge(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_ge(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgefp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgefp.p
/* vec_any_gt */
- res_i = vec_any_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_any_gt(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_any_gt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_gt(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_gt(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_gt(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_gt(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_any_gt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_any_gt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_gt(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_gt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_gt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_gt(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_any_gt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_any_gt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_gt(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_gt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_gt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_gt(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+ res_i = vec_any_gt(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_any_gt(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_any_gt(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_gt(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_gt(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_gt(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_gt(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_gt(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_any_gt(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_any_gt(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_gt(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_gt(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_gt(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_gt(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_gt(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_any_gt(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_any_gt(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_gt(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_gt(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_gt(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_gt(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_gt(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp.p
/* vec_any_le */
- res_i = vec_any_le(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_any_le(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_any_le(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_le(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_le(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_le(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_le(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_any_le(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_any_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_le(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_le(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_le(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_le(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_any_le(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_any_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_le(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_le(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_le(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_le(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
+ res_i = vec_any_le(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_any_le(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_any_le(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_le(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_le(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_le(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_le(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_le(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_any_le(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_any_le(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_le(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_le(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_le(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_le(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_le(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_any_le(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_any_le(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_le(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_le(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_le(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_le(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_le(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgefp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgefp.p
/* vec_any_lt */
- res_i = vec_any_lt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_any_lt(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
- res_i = vec_any_lt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_lt(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_lt(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_lt(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_lt(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
- res_i = vec_any_lt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_any_lt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
- res_i = vec_any_lt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_lt(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_lt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_lt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_lt(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
- res_i = vec_any_lt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_any_lt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
- res_i = vec_any_lt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_lt(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_lt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_lt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_lt(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
- res_i = vec_any_lt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+ res_i = vec_any_lt(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_any_lt(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p
+
+ res_i = vec_any_lt(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_lt(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_lt(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_lt(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_lt(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p
+
+ res_i = vec_any_lt(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_any_lt(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p
+
+ res_i = vec_any_lt(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_lt(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_lt(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_lt(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_lt(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p
+
+ res_i = vec_any_lt(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_any_lt(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p
+
+ res_i = vec_any_lt(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_lt(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_lt(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_lt(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_lt(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p
+
+ res_i = vec_any_lt(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp.p
/* vec_any_nan */
- res_i = vec_any_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+ res_i = vec_any_nan(vf);
+// CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpeqfp.p
/* vec_any_ne */
- res_i = vec_any_ne(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_ne(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_ne(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_ne(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_ne(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_ne(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_ne(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
- res_i = vec_any_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_ne(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_ne(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_ne(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_ne(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_ne(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_ne(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_ne(vp, vp); // CHECK: @llvm.ppc.altivec.vcmpequh.p
- res_i = vec_any_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_ne(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_ne(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_ne(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_ne(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_ne(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_ne(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
- res_i = vec_any_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+ res_i = vec_any_ne(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_ne(vsc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_ne(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_ne(vuc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_ne(vbc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_ne(vbc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_ne(vbc, vbc);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p
+
+ res_i = vec_any_ne(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_ne(vs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_ne(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_ne(vus, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_ne(vbs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_ne(vbs, vus);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_ne(vbs, vbs);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_ne(vp, vp);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p
+
+ res_i = vec_any_ne(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_ne(vi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_ne(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_ne(vui, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_ne(vbi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_ne(vbi, vui);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_ne(vbi, vbi);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p
+
+ res_i = vec_any_ne(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpeqfp.p
/* vec_any_nge */
- res_i = vec_any_nge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
+ res_i = vec_any_nge(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgefp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgefp.p
/* vec_any_ngt */
- res_i = vec_any_ngt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+ res_i = vec_any_ngt(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp.p
/* vec_any_nle */
- res_i = vec_any_nle(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
+ res_i = vec_any_nle(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgefp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgefp.p
/* vec_any_nlt */
- res_i = vec_any_nlt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+ res_i = vec_any_nlt(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp.p
/* vec_any_numeric */
- res_i = vec_any_numeric(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+ res_i = vec_any_numeric(vf);
+// CHECK: @llvm.ppc.altivec.vcmpeqfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpeqfp.p
/* vec_any_out */
- res_i = vec_any_out(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp.p
+ res_i = vec_any_out(vf, vf);
+// CHECK: @llvm.ppc.altivec.vcmpbfp.p
+// CHECK-LE: @llvm.ppc.altivec.vcmpbfp.p
}
/* ------------------------------ Relational Operators ------------------------------ */
@@ -3059,58 +8517,183 @@ void test6() {
void test7() {
vector signed char vsc1 = (vector signed char)(-1);
vector signed char vsc2 = (vector signed char)(-2);
- res_i = (vsc1 == vsc2); // CHECK: @llvm.ppc.altivec.vcmpequb.p(i32 2
- res_i = (vsc1 != vsc2); // CHECK: @llvm.ppc.altivec.vcmpequb.p(i32 0
- res_i = (vsc1 < vsc2); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p(i32 2
- res_i = (vsc1 > vsc2); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p(i32 2
- res_i = (vsc1 <= vsc2); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p(i32 0
- res_i = (vsc1 >= vsc2); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p(i32 0
+ res_i = (vsc1 == vsc2);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p(i32 2
+
+ res_i = (vsc1 != vsc2);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p(i32 0
+
+ res_i = (vsc1 < vsc2);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p(i32 2
+
+ res_i = (vsc1 > vsc2);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p(i32 2
+
+ res_i = (vsc1 <= vsc2);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p(i32 0
+
+ res_i = (vsc1 >= vsc2);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p(i32 0
+
vector unsigned char vuc1 = (vector unsigned char)(1);
vector unsigned char vuc2 = (vector unsigned char)(2);
- res_i = (vuc1 == vuc2); // CHECK: @llvm.ppc.altivec.vcmpequb.p(i32 2
- res_i = (vuc1 != vuc2); // CHECK: @llvm.ppc.altivec.vcmpequb.p(i32 0
- res_i = (vuc1 < vuc2); // CHECK: @llvm.ppc.altivec.vcmpgtub.p(i32 2
- res_i = (vuc1 > vuc2); // CHECK: @llvm.ppc.altivec.vcmpgtub.p(i32 2
- res_i = (vuc1 <= vuc2); // CHECK: @llvm.ppc.altivec.vcmpgtub.p(i32 0
- res_i = (vuc1 >= vuc2); // CHECK: @llvm.ppc.altivec.vcmpgtub.p(i32 0
+ res_i = (vuc1 == vuc2);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p(i32 2
+
+ res_i = (vuc1 != vuc2);
+// CHECK: @llvm.ppc.altivec.vcmpequb.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpequb.p(i32 0
+
+ res_i = (vuc1 < vuc2);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p(i32 2
+
+ res_i = (vuc1 > vuc2);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p(i32 2
+
+ res_i = (vuc1 <= vuc2);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p(i32 0
+
+ res_i = (vuc1 >= vuc2);
+// CHECK: @llvm.ppc.altivec.vcmpgtub.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p(i32 0
+
vector short vs1 = (vector short)(-1);
vector short vs2 = (vector short)(-2);
- res_i = (vs1 == vs2); // CHECK: @llvm.ppc.altivec.vcmpequh.p(i32 2
- res_i = (vs1 != vs2); // CHECK: @llvm.ppc.altivec.vcmpequh.p(i32 0
- res_i = (vs1 < vs2); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p(i32 2
- res_i = (vs1 > vs2); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p(i32 2
- res_i = (vs1 <= vs2); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p(i32 0
- res_i = (vs1 >= vs2); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p(i32 0
+ res_i = (vs1 == vs2);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p(i32 2
+
+ res_i = (vs1 != vs2);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p(i32 0
+
+ res_i = (vs1 < vs2);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p(i32 2
+
+ res_i = (vs1 > vs2);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p(i32 2
+
+ res_i = (vs1 <= vs2);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p(i32 0
+
+ res_i = (vs1 >= vs2);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p(i32 0
+
vector unsigned short vus1 = (vector unsigned short)(1);
vector unsigned short vus2 = (vector unsigned short)(2);
- res_i = (vus1 == vus2); // CHECK: @llvm.ppc.altivec.vcmpequh.p(i32 2
- res_i = (vus1 != vus2); // CHECK: @llvm.ppc.altivec.vcmpequh.p(i32 0
- res_i = (vus1 < vus2); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p(i32 2
- res_i = (vus1 > vus2); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p(i32 2
- res_i = (vus1 <= vus2); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p(i32 0
- res_i = (vus1 >= vus2); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p(i32 0
+ res_i = (vus1 == vus2);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p(i32 2
+
+ res_i = (vus1 != vus2);
+// CHECK: @llvm.ppc.altivec.vcmpequh.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpequh.p(i32 0
+
+ res_i = (vus1 < vus2);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p(i32 2
+
+ res_i = (vus1 > vus2);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p(i32 2
+
+ res_i = (vus1 <= vus2);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p(i32 0
+
+ res_i = (vus1 >= vus2);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p(i32 0
+
vector int vi1 = (vector int)(-1);
vector int vi2 = (vector int)(-2);
- res_i = (vi1 == vi2); // CHECK: @llvm.ppc.altivec.vcmpequw.p(i32 2
- res_i = (vi1 != vi2); // CHECK: @llvm.ppc.altivec.vcmpequw.p(i32 0
- res_i = (vi1 < vi2); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p(i32 2
- res_i = (vi1 > vi2); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p(i32 2
- res_i = (vi1 <= vi2); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p(i32 0
- res_i = (vi1 >= vi2); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p(i32 0
+ res_i = (vi1 == vi2);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p(i32 2
+
+ res_i = (vi1 != vi2);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p(i32 0
+
+ res_i = (vi1 < vi2);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p(i32 2
+
+ res_i = (vi1 > vi2);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p(i32 2
+
+ res_i = (vi1 <= vi2);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p(i32 0
+
+ res_i = (vi1 >= vi2);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p(i32 0
+
vector unsigned int vui1 = (vector unsigned int)(1);
vector unsigned int vui2 = (vector unsigned int)(2);
- res_i = (vui1 == vui2); // CHECK: @llvm.ppc.altivec.vcmpequw.p(i32 2
- res_i = (vui1 != vui2); // CHECK: @llvm.ppc.altivec.vcmpequw.p(i32 0
- res_i = (vui1 < vui2); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p(i32 2
- res_i = (vui1 > vui2); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p(i32 2
- res_i = (vui1 <= vui2); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p(i32 0
- res_i = (vui1 >= vui2); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p(i32 0
+ res_i = (vui1 == vui2);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p(i32 2
+
+ res_i = (vui1 != vui2);
+// CHECK: @llvm.ppc.altivec.vcmpequw.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpequw.p(i32 0
+
+ res_i = (vui1 < vui2);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p(i32 2
+
+ res_i = (vui1 > vui2);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p(i32 2
+
+ res_i = (vui1 <= vui2);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p(i32 0
+
+ res_i = (vui1 >= vui2);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p(i32 0
+
vector float vf1 = (vector float)(1.0);
vector float vf2 = (vector float)(2.0);
- res_i = (vf1 == vf2); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p(i32 2
- res_i = (vf1 != vf2); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p(i32 0
- res_i = (vf1 < vf2); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p(i32 2
- res_i = (vf1 > vf2); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p(i32 2
- res_i = (vf1 <= vf2); // CHECK: @llvm.ppc.altivec.vcmpgefp.p(i32 2
- res_i = (vf1 >= vf2); // CHECK: @llvm.ppc.altivec.vcmpgefp.p(i32 2
+ res_i = (vf1 == vf2);
+// CHECK: @llvm.ppc.altivec.vcmpeqfp.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpeqfp.p(i32 2
+
+ res_i = (vf1 != vf2);
+// CHECK: @llvm.ppc.altivec.vcmpeqfp.p(i32 0
+// CHECK-LE: @llvm.ppc.altivec.vcmpeqfp.p(i32 0
+
+ res_i = (vf1 < vf2);
+// CHECK: @llvm.ppc.altivec.vcmpgtfp.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp.p(i32 2
+
+ res_i = (vf1 > vf2);
+// CHECK: @llvm.ppc.altivec.vcmpgtfp.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp.p(i32 2
+
+ res_i = (vf1 <= vf2);
+// CHECK: @llvm.ppc.altivec.vcmpgefp.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgefp.p(i32 2
+
+ res_i = (vf1 >= vf2);
+// CHECK: @llvm.ppc.altivec.vcmpgefp.p(i32 2
+// CHECK-LE: @llvm.ppc.altivec.vcmpgefp.p(i32 2
}
Index: llvm-suse/tools/clang/test/CodeGen/ppc64-align-struct.c
===================================================================
--- /dev/null
+++ llvm-suse/tools/clang/test/CodeGen/ppc64-align-struct.c
@@ -0,0 +1,154 @@
+// RUN: %clang_cc1 -faltivec -triple powerpc64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
+
+#include <stdarg.h>
+
+struct test1 { int x; int y; };
+struct test2 { int x; int y; } __attribute__((aligned (16)));
+struct test3 { int x; int y; } __attribute__((aligned (32)));
+struct test4 { int x; int y; int z; };
+struct test5 { int x[17]; };
+struct test6 { int x[17]; } __attribute__((aligned (16)));
+struct test7 { int x[17]; } __attribute__((aligned (32)));
+
+// CHECK: define void @test1(i32 signext %x, i64 %y.coerce)
+void test1 (int x, struct test1 y)
+{
+}
+
+// CHECK: define void @test2(i32 signext %x, [1 x i128] %y.coerce)
+void test2 (int x, struct test2 y)
+{
+}
+
+// CHECK: define void @test3(i32 signext %x, [2 x i128] %y.coerce)
+void test3 (int x, struct test3 y)
+{
+}
+
+// CHECK: define void @test4(i32 signext %x, [2 x i64] %y.coerce)
+void test4 (int x, struct test4 y)
+{
+}
+
+// CHECK: define void @test5(i32 signext %x, %struct.test5* byval align 8 %y)
+void test5 (int x, struct test5 y)
+{
+}
+
+// CHECK: define void @test6(i32 signext %x, %struct.test6* byval align 16 %y)
+void test6 (int x, struct test6 y)
+{
+}
+
+// This case requires run-time realignment of the incoming struct
+// CHECK: define void @test7(i32 signext %x, %struct.test7* byval align 16)
+// CHECK: %y = alloca %struct.test7, align 32
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
+void test7 (int x, struct test7 y)
+{
+}
+
+// CHECK: define void @test1va(%struct.test1* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK: %[[CUR:[^ ]+]] = load i8** %ap
+// CHECK: %[[NEXT:[^ ]+]] = getelementptr i8* %[[CUR]], i64 8
+// CHECK: store i8* %[[NEXT]], i8** %ap
+// CHECK: bitcast i8* %[[CUR]] to %struct.test1*
+struct test1 test1va (int x, ...)
+{
+ struct test1 y;
+ va_list ap;
+ va_start(ap, x);
+ y = va_arg (ap, struct test1);
+ va_end(ap);
+ return y;
+}
+
+// CHECK: define void @test2va(%struct.test2* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK: %[[CUR:[^ ]+]] = load i8** %ap
+// CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64
+// CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15
+// CHECK: %[[TMP2:[^ ]+]] = and i64 %[[TMP1]], -16
+// CHECK: %[[ALIGN:[^ ]+]] = inttoptr i64 %[[TMP2]] to i8*
+// CHECK: %[[NEXT:[^ ]+]] = getelementptr i8* %[[ALIGN]], i64 16
+// CHECK: store i8* %[[NEXT]], i8** %ap
+// CHECK: bitcast i8* %[[ALIGN]] to %struct.test2*
+struct test2 test2va (int x, ...)
+{
+ struct test2 y;
+ va_list ap;
+ va_start(ap, x);
+ y = va_arg (ap, struct test2);
+ va_end(ap);
+ return y;
+}
+
+// CHECK: define void @test3va(%struct.test3* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK: %[[CUR:[^ ]+]] = load i8** %ap
+// CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64
+// CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15
+// CHECK: %[[TMP2:[^ ]+]] = and i64 %[[TMP1]], -16
+// CHECK: %[[ALIGN:[^ ]+]] = inttoptr i64 %[[TMP2]] to i8*
+// CHECK: %[[NEXT:[^ ]+]] = getelementptr i8* %[[ALIGN]], i64 32
+// CHECK: store i8* %[[NEXT]], i8** %ap
+// CHECK: bitcast i8* %[[ALIGN]] to %struct.test3*
+struct test3 test3va (int x, ...)
+{
+ struct test3 y;
+ va_list ap;
+ va_start(ap, x);
+ y = va_arg (ap, struct test3);
+ va_end(ap);
+ return y;
+}
+
+// CHECK: define void @test4va(%struct.test4* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK: %[[CUR:[^ ]+]] = load i8** %ap
+// CHECK: %[[NEXT:[^ ]+]] = getelementptr i8* %[[CUR]], i64 16
+// CHECK: store i8* %[[NEXT]], i8** %ap
+// CHECK: bitcast i8* %[[CUR]] to %struct.test4*
+struct test4 test4va (int x, ...)
+{
+ struct test4 y;
+ va_list ap;
+ va_start(ap, x);
+ y = va_arg (ap, struct test4);
+ va_end(ap);
+ return y;
+}
+
+// CHECK: define void @testva_longdouble(%struct.test_longdouble* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK: %[[CUR:[^ ]+]] = load i8** %ap
+// CHECK: %[[NEXT:[^ ]+]] = getelementptr i8* %[[CUR]], i64 16
+// CHECK: store i8* %[[NEXT]], i8** %ap
+// CHECK: bitcast i8* %[[CUR]] to %struct.test_longdouble*
+struct test_longdouble { long double x; };
+struct test_longdouble testva_longdouble (int x, ...)
+{
+ struct test_longdouble y;
+ va_list ap;
+ va_start(ap, x);
+ y = va_arg (ap, struct test_longdouble);
+ va_end(ap);
+ return y;
+}
+
+// CHECK: define void @testva_vector(%struct.test_vector* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK: %[[CUR:[^ ]+]] = load i8** %ap
+// CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64
+// CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15
+// CHECK: %[[TMP2:[^ ]+]] = and i64 %[[TMP1]], -16
+// CHECK: %[[ALIGN:[^ ]+]] = inttoptr i64 %[[TMP2]] to i8*
+// CHECK: %[[NEXT:[^ ]+]] = getelementptr i8* %[[ALIGN]], i64 16
+// CHECK: store i8* %[[NEXT]], i8** %ap
+// CHECK: bitcast i8* %[[ALIGN]] to %struct.test_vector*
+struct test_vector { vector int x; };
+struct test_vector testva_vector (int x, ...)
+{
+ struct test_vector y;
+ va_list ap;
+ va_start(ap, x);
+ y = va_arg (ap, struct test_vector);
+ va_end(ap);
+ return y;
+}
+
Index: llvm-suse/tools/clang/test/CodeGen/ppc64-vector.c
===================================================================
--- /dev/null
+++ llvm-suse/tools/clang/test/CodeGen/ppc64-vector.c
@@ -0,0 +1,52 @@
+// RUN: %clang_cc1 -faltivec -triple powerpc64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
+
+typedef short v2i16 __attribute__((vector_size (4)));
+typedef short v3i16 __attribute__((vector_size (6)));
+typedef short v4i16 __attribute__((vector_size (8)));
+typedef short v6i16 __attribute__((vector_size (12)));
+typedef short v8i16 __attribute__((vector_size (16)));
+typedef short v16i16 __attribute__((vector_size (32)));
+
+struct v16i16 { v16i16 x; };
+
+// CHECK: define i32 @test_v2i16(i32 %x.coerce)
+v2i16 test_v2i16(v2i16 x)
+{
+ return x;
+}
+
+// CHECK: define i64 @test_v3i16(i64 %x.coerce)
+v3i16 test_v3i16(v3i16 x)
+{
+ return x;
+}
+
+// CHECK: define i64 @test_v4i16(i64 %x.coerce)
+v4i16 test_v4i16(v4i16 x)
+{
+ return x;
+}
+
+// CHECK: define <6 x i16> @test_v6i16(<6 x i16> %x)
+v6i16 test_v6i16(v6i16 x)
+{
+ return x;
+}
+
+// CHECK: define <8 x i16> @test_v8i16(<8 x i16> %x)
+v8i16 test_v8i16(v8i16 x)
+{
+ return x;
+}
+
+// CHECK: define void @test_v16i16(<16 x i16>* noalias sret %agg.result, <16 x i16>*)
+v16i16 test_v16i16(v16i16 x)
+{
+ return x;
+}
+
+// CHECK: define void @test_struct_v16i16(%struct.v16i16* noalias sret %agg.result, [2 x i128] %x.coerce)
+struct v16i16 test_struct_v16i16(struct v16i16 x)
+{
+ return x;
+}
Index: llvm-suse/tools/clang/test/CodeGen/ppc64le-aggregates.c
===================================================================
--- /dev/null
+++ llvm-suse/tools/clang/test/CodeGen/ppc64le-aggregates.c
@@ -0,0 +1,423 @@
+// REQUIRES: powerpc-registered-target
+// RUN: %clang_cc1 -faltivec -triple powerpc64le-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
+
+// Test homogeneous float aggregate passing and returning.
+
+struct f1 { float f[1]; };
+struct f2 { float f[2]; };
+struct f3 { float f[3]; };
+struct f4 { float f[4]; };
+struct f5 { float f[5]; };
+struct f6 { float f[6]; };
+struct f7 { float f[7]; };
+struct f8 { float f[8]; };
+struct f9 { float f[9]; };
+
+struct fab { float a; float b; };
+struct fabc { float a; float b; float c; };
+
+// CHECK: define [1 x float] @func_f1(float inreg %x.coerce)
+struct f1 func_f1(struct f1 x) { return x; }
+
+// CHECK: define [2 x float] @func_f2([2 x float] %x.coerce)
+struct f2 func_f2(struct f2 x) { return x; }
+
+// CHECK: define [3 x float] @func_f3([3 x float] %x.coerce)
+struct f3 func_f3(struct f3 x) { return x; }
+
+// CHECK: define [4 x float] @func_f4([4 x float] %x.coerce)
+struct f4 func_f4(struct f4 x) { return x; }
+
+// CHECK: define [5 x float] @func_f5([5 x float] %x.coerce)
+struct f5 func_f5(struct f5 x) { return x; }
+
+// CHECK: define [6 x float] @func_f6([6 x float] %x.coerce)
+struct f6 func_f6(struct f6 x) { return x; }
+
+// CHECK: define [7 x float] @func_f7([7 x float] %x.coerce)
+struct f7 func_f7(struct f7 x) { return x; }
+
+// CHECK: define [8 x float] @func_f8([8 x float] %x.coerce)
+struct f8 func_f8(struct f8 x) { return x; }
+
+// CHECK: define void @func_f9(%struct.f9* noalias sret %agg.result, [5 x i64] %x.coerce)
+struct f9 func_f9(struct f9 x) { return x; }
+
+// CHECK: define [2 x float] @func_fab([2 x float] %x.coerce)
+struct fab func_fab(struct fab x) { return x; }
+
+// CHECK: define [3 x float] @func_fabc([3 x float] %x.coerce)
+struct fabc func_fabc(struct fabc x) { return x; }
+
+// CHECK-LABEL: @call_f1
+// CHECK: %[[TMP:[^ ]+]] = load float* getelementptr inbounds (%struct.f1* @global_f1, i32 0, i32 0, i32 0), align 1
+// CHECK: call [1 x float] @func_f1(float inreg %[[TMP]])
+struct f1 global_f1;
+void call_f1(void) { global_f1 = func_f1(global_f1); }
+
+// CHECK-LABEL: @call_f2
+// CHECK: %[[TMP:[^ ]+]] = load [2 x float]* getelementptr inbounds (%struct.f2* @global_f2, i32 0, i32 0), align 1
+// CHECK: call [2 x float] @func_f2([2 x float] %[[TMP]])
+struct f2 global_f2;
+void call_f2(void) { global_f2 = func_f2(global_f2); }
+
+// CHECK-LABEL: @call_f3
+// CHECK: %[[TMP:[^ ]+]] = load [3 x float]* getelementptr inbounds (%struct.f3* @global_f3, i32 0, i32 0), align 1
+// CHECK: call [3 x float] @func_f3([3 x float] %[[TMP]])
+struct f3 global_f3;
+void call_f3(void) { global_f3 = func_f3(global_f3); }
+
+// CHECK-LABEL: @call_f4
+// CHECK: %[[TMP:[^ ]+]] = load [4 x float]* getelementptr inbounds (%struct.f4* @global_f4, i32 0, i32 0), align 1
+// CHECK: call [4 x float] @func_f4([4 x float] %[[TMP]])
+struct f4 global_f4;
+void call_f4(void) { global_f4 = func_f4(global_f4); }
+
+// CHECK-LABEL: @call_f5
+// CHECK: %[[TMP:[^ ]+]] = load [5 x float]* getelementptr inbounds (%struct.f5* @global_f5, i32 0, i32 0), align 1
+// CHECK: call [5 x float] @func_f5([5 x float] %[[TMP]])
+struct f5 global_f5;
+void call_f5(void) { global_f5 = func_f5(global_f5); }
+
+// CHECK-LABEL: @call_f6
+// CHECK: %[[TMP:[^ ]+]] = load [6 x float]* getelementptr inbounds (%struct.f6* @global_f6, i32 0, i32 0), align 1
+// CHECK: call [6 x float] @func_f6([6 x float] %[[TMP]])
+struct f6 global_f6;
+void call_f6(void) { global_f6 = func_f6(global_f6); }
+
+// CHECK-LABEL: @call_f7
+// CHECK: %[[TMP:[^ ]+]] = load [7 x float]* getelementptr inbounds (%struct.f7* @global_f7, i32 0, i32 0), align 1
+// CHECK: call [7 x float] @func_f7([7 x float] %[[TMP]])
+struct f7 global_f7;
+void call_f7(void) { global_f7 = func_f7(global_f7); }
+
+// CHECK-LABEL: @call_f8
+// CHECK: %[[TMP:[^ ]+]] = load [8 x float]* getelementptr inbounds (%struct.f8* @global_f8, i32 0, i32 0), align 1
+// CHECK: call [8 x float] @func_f8([8 x float] %[[TMP]])
+struct f8 global_f8;
+void call_f8(void) { global_f8 = func_f8(global_f8); }
+
+// CHECK-LABEL: @call_f9
+// CHECK: %[[TMP1:[^ ]+]] = alloca [5 x i64]
+// CHECK: %[[TMP2:[^ ]+]] = bitcast [5 x i64]* %[[TMP1]] to i8*
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %[[TMP2]], i8* bitcast (%struct.f9* @global_f9 to i8*), i64 36, i32 1, i1 false)
+// CHECK: %[[TMP3:[^ ]+]] = load [5 x i64]* %[[TMP1]]
+// CHECK: call void @func_f9(%struct.f9* sret %{{[^ ]+}}, [5 x i64] %[[TMP3]])
+struct f9 global_f9;
+void call_f9(void) { global_f9 = func_f9(global_f9); }
+
+// CHECK-LABEL: @call_fab
+// CHECK: %[[TMP:[^ ]+]] = load [2 x float]* bitcast (%struct.fab* @global_fab to [2 x float]*)
+// CHECK: call [2 x float] @func_fab([2 x float] %[[TMP]])
+struct fab global_fab;
+void call_fab(void) { global_fab = func_fab(global_fab); }
+
+// CHECK-LABEL: @call_fabc
+// CHECK: %[[TMP:[^ ]+]] = load [3 x float]* bitcast (%struct.fabc* @global_fabc to [3 x float]*)
+// CHECK: call [3 x float] @func_fabc([3 x float] %[[TMP]])
+struct fabc global_fabc;
+void call_fabc(void) { global_fabc = func_fabc(global_fabc); }
+
+
+// Test homogeneous vector aggregate passing and returning.
+
+struct v1 { vector int v[1]; };
+struct v2 { vector int v[2]; };
+struct v3 { vector int v[3]; };
+struct v4 { vector int v[4]; };
+struct v5 { vector int v[5]; };
+struct v6 { vector int v[6]; };
+struct v7 { vector int v[7]; };
+struct v8 { vector int v[8]; };
+struct v9 { vector int v[9]; };
+
+struct vab { vector int a; vector int b; };
+struct vabc { vector int a; vector int b; vector int c; };
+
+// CHECK: define [1 x <4 x i32>] @func_v1(<4 x i32> inreg %x.coerce)
+struct v1 func_v1(struct v1 x) { return x; }
+
+// CHECK: define [2 x <4 x i32>] @func_v2([2 x <4 x i32>] %x.coerce)
+struct v2 func_v2(struct v2 x) { return x; }
+
+// CHECK: define [3 x <4 x i32>] @func_v3([3 x <4 x i32>] %x.coerce)
+struct v3 func_v3(struct v3 x) { return x; }
+
+// CHECK: define [4 x <4 x i32>] @func_v4([4 x <4 x i32>] %x.coerce)
+struct v4 func_v4(struct v4 x) { return x; }
+
+// CHECK: define [5 x <4 x i32>] @func_v5([5 x <4 x i32>] %x.coerce)
+struct v5 func_v5(struct v5 x) { return x; }
+
+// CHECK: define [6 x <4 x i32>] @func_v6([6 x <4 x i32>] %x.coerce)
+struct v6 func_v6(struct v6 x) { return x; }
+
+// CHECK: define [7 x <4 x i32>] @func_v7([7 x <4 x i32>] %x.coerce)
+struct v7 func_v7(struct v7 x) { return x; }
+
+// CHECK: define [8 x <4 x i32>] @func_v8([8 x <4 x i32>] %x.coerce)
+struct v8 func_v8(struct v8 x) { return x; }
+
+// CHECK: define void @func_v9(%struct.v9* noalias sret %agg.result, %struct.v9* byval align 16 %x)
+struct v9 func_v9(struct v9 x) { return x; }
+
+// CHECK: define [2 x <4 x i32>] @func_vab([2 x <4 x i32>] %x.coerce)
+struct vab func_vab(struct vab x) { return x; }
+
+// CHECK: define [3 x <4 x i32>] @func_vabc([3 x <4 x i32>] %x.coerce)
+struct vabc func_vabc(struct vabc x) { return x; }
+
+// CHECK-LABEL: @call_v1
+// CHECK: %[[TMP:[^ ]+]] = load <4 x i32>* getelementptr inbounds (%struct.v1* @global_v1, i32 0, i32 0, i32 0), align 1
+// CHECK: call [1 x <4 x i32>] @func_v1(<4 x i32> inreg %[[TMP]])
+struct v1 global_v1;
+void call_v1(void) { global_v1 = func_v1(global_v1); }
+
+// CHECK-LABEL: @call_v2
+// CHECK: %[[TMP:[^ ]+]] = load [2 x <4 x i32>]* getelementptr inbounds (%struct.v2* @global_v2, i32 0, i32 0), align 1
+// CHECK: call [2 x <4 x i32>] @func_v2([2 x <4 x i32>] %[[TMP]])
+struct v2 global_v2;
+void call_v2(void) { global_v2 = func_v2(global_v2); }
+
+// CHECK-LABEL: @call_v3
+// CHECK: %[[TMP:[^ ]+]] = load [3 x <4 x i32>]* getelementptr inbounds (%struct.v3* @global_v3, i32 0, i32 0), align 1
+// CHECK: call [3 x <4 x i32>] @func_v3([3 x <4 x i32>] %[[TMP]])
+struct v3 global_v3;
+void call_v3(void) { global_v3 = func_v3(global_v3); }
+
+// CHECK-LABEL: @call_v4
+// CHECK: %[[TMP:[^ ]+]] = load [4 x <4 x i32>]* getelementptr inbounds (%struct.v4* @global_v4, i32 0, i32 0), align 1
+// CHECK: call [4 x <4 x i32>] @func_v4([4 x <4 x i32>] %[[TMP]])
+struct v4 global_v4;
+void call_v4(void) { global_v4 = func_v4(global_v4); }
+
+// CHECK-LABEL: @call_v5
+// CHECK: %[[TMP:[^ ]+]] = load [5 x <4 x i32>]* getelementptr inbounds (%struct.v5* @global_v5, i32 0, i32 0), align 1
+// CHECK: call [5 x <4 x i32>] @func_v5([5 x <4 x i32>] %[[TMP]])
+struct v5 global_v5;
+void call_v5(void) { global_v5 = func_v5(global_v5); }
+
+// CHECK-LABEL: @call_v6
+// CHECK: %[[TMP:[^ ]+]] = load [6 x <4 x i32>]* getelementptr inbounds (%struct.v6* @global_v6, i32 0, i32 0), align 1
+// CHECK: call [6 x <4 x i32>] @func_v6([6 x <4 x i32>] %[[TMP]])
+struct v6 global_v6;
+void call_v6(void) { global_v6 = func_v6(global_v6); }
+
+// CHECK-LABEL: @call_v7
+// CHECK: %[[TMP:[^ ]+]] = load [7 x <4 x i32>]* getelementptr inbounds (%struct.v7* @global_v7, i32 0, i32 0), align 1
+// CHECK: call [7 x <4 x i32>] @func_v7([7 x <4 x i32>] %[[TMP]])
+struct v7 global_v7;
+void call_v7(void) { global_v7 = func_v7(global_v7); }
+
+// CHECK-LABEL: @call_v8
+// CHECK: %[[TMP:[^ ]+]] = load [8 x <4 x i32>]* getelementptr inbounds (%struct.v8* @global_v8, i32 0, i32 0), align 1
+// CHECK: call [8 x <4 x i32>] @func_v8([8 x <4 x i32>] %[[TMP]])
+struct v8 global_v8;
+void call_v8(void) { global_v8 = func_v8(global_v8); }
+
+// CHECK-LABEL: @call_v9
+// CHECK: call void @func_v9(%struct.v9* sret %{{[^ ]+}}, %struct.v9* byval align 16 @global_v9)
+struct v9 global_v9;
+void call_v9(void) { global_v9 = func_v9(global_v9); }
+
+// CHECK-LABEL: @call_vab
+// CHECK: %[[TMP:[^ ]+]] = load [2 x <4 x i32>]* bitcast (%struct.vab* @global_vab to [2 x <4 x i32>]*)
+// CHECK: call [2 x <4 x i32>] @func_vab([2 x <4 x i32>] %[[TMP]])
+struct vab global_vab;
+void call_vab(void) { global_vab = func_vab(global_vab); }
+
+// CHECK-LABEL: @call_vabc
+// CHECK: %[[TMP:[^ ]+]] = load [3 x <4 x i32>]* bitcast (%struct.vabc* @global_vabc to [3 x <4 x i32>]*)
+// CHECK: call [3 x <4 x i32>] @func_vabc([3 x <4 x i32>] %[[TMP]])
+struct vabc global_vabc;
+void call_vabc(void) { global_vabc = func_vabc(global_vabc); }
+
+
+// As clang extension, non-power-of-two vectors may also be part of
+// homogeneous aggregates.
+
+typedef float float3 __attribute__((vector_size (12)));
+
+struct v3f1 { float3 v[1]; };
+struct v3f2 { float3 v[2]; };
+struct v3f3 { float3 v[3]; };
+struct v3f4 { float3 v[4]; };
+struct v3f5 { float3 v[5]; };
+struct v3f6 { float3 v[6]; };
+struct v3f7 { float3 v[7]; };
+struct v3f8 { float3 v[8]; };
+struct v3f9 { float3 v[9]; };
+
+struct v3fab { float3 a; float3 b; };
+struct v3fabc { float3 a; float3 b; float3 c; };
+
+// CHECK: define [1 x <3 x float>] @func_v3f1(<3 x float> inreg %x.coerce)
+struct v3f1 func_v3f1(struct v3f1 x) { return x; }
+
+// CHECK: define [2 x <3 x float>] @func_v3f2([2 x <3 x float>] %x.coerce)
+struct v3f2 func_v3f2(struct v3f2 x) { return x; }
+
+// CHECK: define [3 x <3 x float>] @func_v3f3([3 x <3 x float>] %x.coerce)
+struct v3f3 func_v3f3(struct v3f3 x) { return x; }
+
+// CHECK: define [4 x <3 x float>] @func_v3f4([4 x <3 x float>] %x.coerce)
+struct v3f4 func_v3f4(struct v3f4 x) { return x; }
+
+// CHECK: define [5 x <3 x float>] @func_v3f5([5 x <3 x float>] %x.coerce)
+struct v3f5 func_v3f5(struct v3f5 x) { return x; }
+
+// CHECK: define [6 x <3 x float>] @func_v3f6([6 x <3 x float>] %x.coerce)
+struct v3f6 func_v3f6(struct v3f6 x) { return x; }
+
+// CHECK: define [7 x <3 x float>] @func_v3f7([7 x <3 x float>] %x.coerce)
+struct v3f7 func_v3f7(struct v3f7 x) { return x; }
+
+// CHECK: define [8 x <3 x float>] @func_v3f8([8 x <3 x float>] %x.coerce)
+struct v3f8 func_v3f8(struct v3f8 x) { return x; }
+
+// CHECK: define void @func_v3f9(%struct.v3f9* noalias sret %agg.result, %struct.v3f9* byval align 16 %x)
+struct v3f9 func_v3f9(struct v3f9 x) { return x; }
+
+// CHECK: define [2 x <3 x float>] @func_v3fab([2 x <3 x float>] %x.coerce)
+struct v3fab func_v3fab(struct v3fab x) { return x; }
+
+// CHECK: define [3 x <3 x float>] @func_v3fabc([3 x <3 x float>] %x.coerce)
+struct v3fabc func_v3fabc(struct v3fabc x) { return x; }
+
+// CHECK-LABEL: @call_v3f1
+// CHECK: %[[TMP:[^ ]+]] = load <3 x float>* getelementptr inbounds (%struct.v3f1* @global_v3f1, i32 0, i32 0, i32 0), align 1
+// CHECK: call [1 x <3 x float>] @func_v3f1(<3 x float> inreg %[[TMP]])
+struct v3f1 global_v3f1;
+void call_v3f1(void) { global_v3f1 = func_v3f1(global_v3f1); }
+
+// CHECK-LABEL: @call_v3f2
+// CHECK: %[[TMP:[^ ]+]] = load [2 x <3 x float>]* getelementptr inbounds (%struct.v3f2* @global_v3f2, i32 0, i32 0), align 1
+// CHECK: call [2 x <3 x float>] @func_v3f2([2 x <3 x float>] %[[TMP]])
+struct v3f2 global_v3f2;
+void call_v3f2(void) { global_v3f2 = func_v3f2(global_v3f2); }
+
+// CHECK-LABEL: @call_v3f3
+// CHECK: %[[TMP:[^ ]+]] = load [3 x <3 x float>]* getelementptr inbounds (%struct.v3f3* @global_v3f3, i32 0, i32 0), align 1
+// CHECK: call [3 x <3 x float>] @func_v3f3([3 x <3 x float>] %[[TMP]])
+struct v3f3 global_v3f3;
+void call_v3f3(void) { global_v3f3 = func_v3f3(global_v3f3); }
+
+// CHECK-LABEL: @call_v3f4
+// CHECK: %[[TMP:[^ ]+]] = load [4 x <3 x float>]* getelementptr inbounds (%struct.v3f4* @global_v3f4, i32 0, i32 0), align 1
+// CHECK: call [4 x <3 x float>] @func_v3f4([4 x <3 x float>] %[[TMP]])
+struct v3f4 global_v3f4;
+void call_v3f4(void) { global_v3f4 = func_v3f4(global_v3f4); }
+
+// CHECK-LABEL: @call_v3f5
+// CHECK: %[[TMP:[^ ]+]] = load [5 x <3 x float>]* getelementptr inbounds (%struct.v3f5* @global_v3f5, i32 0, i32 0), align 1
+// CHECK: call [5 x <3 x float>] @func_v3f5([5 x <3 x float>] %[[TMP]])
+struct v3f5 global_v3f5;
+void call_v3f5(void) { global_v3f5 = func_v3f5(global_v3f5); }
+
+// CHECK-LABEL: @call_v3f6
+// CHECK: %[[TMP:[^ ]+]] = load [6 x <3 x float>]* getelementptr inbounds (%struct.v3f6* @global_v3f6, i32 0, i32 0), align 1
+// CHECK: call [6 x <3 x float>] @func_v3f6([6 x <3 x float>] %[[TMP]])
+struct v3f6 global_v3f6;
+void call_v3f6(void) { global_v3f6 = func_v3f6(global_v3f6); }
+
+// CHECK-LABEL: @call_v3f7
+// CHECK: %[[TMP:[^ ]+]] = load [7 x <3 x float>]* getelementptr inbounds (%struct.v3f7* @global_v3f7, i32 0, i32 0), align 1
+// CHECK: call [7 x <3 x float>] @func_v3f7([7 x <3 x float>] %[[TMP]])
+struct v3f7 global_v3f7;
+void call_v3f7(void) { global_v3f7 = func_v3f7(global_v3f7); }
+
+// CHECK-LABEL: @call_v3f8
+// CHECK: %[[TMP:[^ ]+]] = load [8 x <3 x float>]* getelementptr inbounds (%struct.v3f8* @global_v3f8, i32 0, i32 0), align 1
+// CHECK: call [8 x <3 x float>] @func_v3f8([8 x <3 x float>] %[[TMP]])
+struct v3f8 global_v3f8;
+void call_v3f8(void) { global_v3f8 = func_v3f8(global_v3f8); }
+
+// CHECK-LABEL: @call_v3f9
+// CHECK: call void @func_v3f9(%struct.v3f9* sret %{{[^ ]+}}, %struct.v3f9* byval align 16 @global_v3f9)
+struct v3f9 global_v3f9;
+void call_v3f9(void) { global_v3f9 = func_v3f9(global_v3f9); }
+
+// CHECK-LABEL: @call_v3fab
+// CHECK: %[[TMP:[^ ]+]] = load [2 x <3 x float>]* bitcast (%struct.v3fab* @global_v3fab to [2 x <3 x float>]*)
+// CHECK: call [2 x <3 x float>] @func_v3fab([2 x <3 x float>] %[[TMP]])
+struct v3fab global_v3fab;
+void call_v3fab(void) { global_v3fab = func_v3fab(global_v3fab); }
+
+// CHECK-LABEL: @call_v3fabc
+// CHECK: %[[TMP:[^ ]+]] = load [3 x <3 x float>]* bitcast (%struct.v3fabc* @global_v3fabc to [3 x <3 x float>]*)
+// CHECK: call [3 x <3 x float>] @func_v3fabc([3 x <3 x float>] %[[TMP]])
+struct v3fabc global_v3fabc;
+void call_v3fabc(void) { global_v3fabc = func_v3fabc(global_v3fabc); }
+
+
+// Test returning small aggregates.
+
+struct s1 { char c[1]; };
+struct s2 { char c[2]; };
+struct s3 { char c[3]; };
+struct s4 { char c[4]; };
+struct s5 { char c[5]; };
+struct s6 { char c[6]; };
+struct s7 { char c[7]; };
+struct s8 { char c[8]; };
+struct s9 { char c[9]; };
+struct s16 { char c[16]; };
+struct s17 { char c[17]; };
+
+// CHECK: define i8 @ret_s1()
+struct s1 ret_s1() {
+ return (struct s1) { 17 };
+}
+
+// CHECK: define i16 @ret_s2()
+struct s2 ret_s2() {
+ return (struct s2) { 17, 18 };
+}
+
+// CHECK: define i24 @ret_s3()
+struct s3 ret_s3() {
+ return (struct s3) { 17, 18, 19 };
+}
+
+// CHECK: define i32 @ret_s4()
+struct s4 ret_s4() {
+ return (struct s4) { 17, 18, 19, 20 };
+}
+
+// CHECK: define i40 @ret_s5()
+struct s5 ret_s5() {
+ return (struct s5) { 17, 18, 19, 20, 21 };
+}
+
+// CHECK: define i48 @ret_s6()
+struct s6 ret_s6() {
+ return (struct s6) { 17, 18, 19, 20, 21, 22 };
+}
+
+// CHECK: define i56 @ret_s7()
+struct s7 ret_s7() {
+ return (struct s7) { 17, 18, 19, 20, 21, 22, 23 };
+}
+
+// CHECK: define i64 @ret_s8()
+struct s8 ret_s8() {
+ return (struct s8) { 17, 18, 19, 20, 21, 22, 23, 24 };
+}
+
+// CHECK: define { i64, i64 } @ret_s9()
+struct s9 ret_s9() {
+ return (struct s9) { 17, 18, 19, 20, 21, 22, 23, 24, 25 };
+}
+
+// CHECK: define { i64, i64 } @ret_s16()
+struct s16 ret_s16() {
+ return (struct s16) { 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32 };
+}
+
+// CHECK: define void @ret_s17(%struct.s17*
+struct s17 ret_s17() {
+ return (struct s17) { 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33 };
+}
+
Index: llvm-suse/tools/clang/test/CodeGen/ppc64le-varargs-complex.c
===================================================================
--- /dev/null
+++ llvm-suse/tools/clang/test/CodeGen/ppc64le-varargs-complex.c
@@ -0,0 +1,69 @@
+// REQUIRES: powerpc-registered-target
+// RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
+
+#include <stdarg.h>
+
+void testva (int n, ...)
+{
+ va_list ap;
+
+ _Complex int i = va_arg(ap, _Complex int);
+ // CHECK: %[[VAR40:[A-Za-z0-9.]+]] = load i8** %[[VAR100:[A-Za-z0-9.]+]]
+ // CHECK-NEXT: %[[VAR41:[A-Za-z0-9.]+]] = getelementptr i8* %[[VAR40]], i64 16
+ // CHECK-NEXT: store i8* %[[VAR41]], i8** %[[VAR100]]
+ // CHECK-NEXT: %[[VAR1:[A-Za-z0-9.]+]] = ptrtoint i8* %[[VAR40]] to i64
+ // CHECK-NEXT: %[[VAR3:[A-Za-z0-9.]+]] = add i64 %[[VAR1]], 8
+ // CHECK-NEXT: %[[VAR4:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR1]] to i32*
+ // CHECK-NEXT: %[[VAR5:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR3]] to i32*
+ // CHECK-NEXT: %[[VAR6:[A-Za-z0-9.]+]] = load i32* %[[VAR4]]
+ // CHECK-NEXT: %[[VAR7:[A-Za-z0-9.]+]] = load i32* %[[VAR5]]
+ // CHECK-NEXT: %[[VAR8:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }* %[[VAR0:[A-Za-z0-9.]+]], i32 0, i32 0
+ // CHECK-NEXT: %[[VAR9:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }* %[[VAR0]], i32 0, i32 1
+ // CHECK-NEXT: store i32 %[[VAR6]], i32* %[[VAR8]]
+ // CHECK-NEXT: store i32 %[[VAR7]], i32* %[[VAR9]]
+
+ _Complex short s = va_arg(ap, _Complex short);
+ // CHECK: %[[VAR50:[A-Za-z0-9.]+]] = load i8** %[[VAR100:[A-Za-z0-9.]+]]
+ // CHECK-NEXT: %[[VAR51:[A-Za-z0-9.]+]] = getelementptr i8* %[[VAR50]], i64 16
+ // CHECK-NEXT: store i8* %[[VAR51]], i8** %[[VAR100]]
+ // CHECK: %[[VAR11:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64
+ // CHECK-NEXT: %[[VAR13:[A-Za-z0-9.]+]] = add i64 %[[VAR11]], 8
+ // CHECK-NEXT: %[[VAR14:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR11]] to i16*
+ // CHECK-NEXT: %[[VAR15:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR13]] to i16*
+ // CHECK-NEXT: %[[VAR16:[A-Za-z0-9.]+]] = load i16* %[[VAR14]]
+ // CHECK-NEXT: %[[VAR17:[A-Za-z0-9.]+]] = load i16* %[[VAR15]]
+ // CHECK-NEXT: %[[VAR18:[A-Za-z0-9.]+]] = getelementptr inbounds { i16, i16 }* %[[VAR10:[A-Za-z0-9.]+]], i32 0, i32 0
+ // CHECK-NEXT: %[[VAR19:[A-Za-z0-9.]+]] = getelementptr inbounds { i16, i16 }* %[[VAR10]], i32 0, i32 1
+ // CHECK-NEXT: store i16 %[[VAR16]], i16* %[[VAR18]]
+ // CHECK-NEXT: store i16 %[[VAR17]], i16* %[[VAR19]]
+
+ _Complex char c = va_arg(ap, _Complex char);
+ // CHECK: %[[VAR60:[A-Za-z0-9.]+]] = load i8** %[[VAR100:[A-Za-z0-9.]+]]
+ // CHECK-NEXT: %[[VAR61:[A-Za-z0-9.]+]] = getelementptr i8* %[[VAR60]], i64 16
+ // CHECK-NEXT: store i8* %[[VAR61]], i8** %[[VAR100]]
+ // CHECK: %[[VAR21:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64
+ // CHECK-NEXT: %[[VAR23:[A-Za-z0-9.]+]] = add i64 %[[VAR21]], 8
+ // CHECK-NEXT: %[[VAR24:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR21]] to i8*
+ // CHECK-NEXT: %[[VAR25:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR23]] to i8*
+ // CHECK-NEXT: %[[VAR26:[A-Za-z0-9.]+]] = load i8* %[[VAR24]]
+ // CHECK-NEXT: %[[VAR27:[A-Za-z0-9.]+]] = load i8* %[[VAR25]]
+ // CHECK-NEXT: %[[VAR28:[A-Za-z0-9.]+]] = getelementptr inbounds { i8, i8 }* %[[VAR20:[A-Za-z0-9.]+]], i32 0, i32 0
+ // CHECK-NEXT: %[[VAR29:[A-Za-z0-9.]+]] = getelementptr inbounds { i8, i8 }* %[[VAR20]], i32 0, i32 1
+ // CHECK-NEXT: store i8 %[[VAR26]], i8* %[[VAR28]]
+ // CHECK-NEXT: store i8 %[[VAR27]], i8* %[[VAR29]]
+
+ _Complex float f = va_arg(ap, _Complex float);
+ // CHECK: %[[VAR70:[A-Za-z0-9.]+]] = load i8** %[[VAR100:[A-Za-z0-9.]+]]
+ // CHECK-NEXT: %[[VAR71:[A-Za-z0-9.]+]] = getelementptr i8* %[[VAR70]], i64 16
+ // CHECK-NEXT: store i8* %[[VAR71]], i8** %[[VAR100]]
+ // CHECK: %[[VAR31:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64
+ // CHECK-NEXT: %[[VAR33:[A-Za-z0-9.]+]] = add i64 %[[VAR31]], 8
+ // CHECK-NEXT: %[[VAR34:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR31]] to float*
+ // CHECK-NEXT: %[[VAR35:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR33]] to float*
+ // CHECK-NEXT: %[[VAR36:[A-Za-z0-9.]+]] = load float* %[[VAR34]]
+ // CHECK-NEXT: %[[VAR37:[A-Za-z0-9.]+]] = load float* %[[VAR35]]
+ // CHECK-NEXT: %[[VAR38:[A-Za-z0-9.]+]] = getelementptr inbounds { float, float }* %[[VAR30:[A-Za-z0-9.]+]], i32 0, i32 0
+ // CHECK-NEXT: %[[VAR39:[A-Za-z0-9.]+]] = getelementptr inbounds { float, float }* %[[VAR30]], i32 0, i32 1
+ // CHECK-NEXT: store float %[[VAR36]], float* %[[VAR38]]
+ // CHECK-NEXT: store float %[[VAR37]], float* %[[VAR39]]
+}
Index: llvm-suse/tools/clang/test/Driver/altivec-asm.S
===================================================================
--- llvm-suse.orig/tools/clang/test/Driver/altivec-asm.S
+++ llvm-suse/tools/clang/test/Driver/altivec-asm.S
@@ -1,3 +1,4 @@
// RUN: %clang -target powerpc64-linux-gnu -maltivec -S %s -o - | FileCheck %s
+// RUN: %clang -target powerpc64le-linux-gnu -maltivec -S %s -o - | FileCheck %s
// Verify that assembling an empty file does not auto-include altivec.h.
// CHECK-NOT: static vector
Index: llvm-suse/tools/clang/test/Driver/clang-translation.c
===================================================================
--- llvm-suse.orig/tools/clang/test/Driver/clang-translation.c
+++ llvm-suse/tools/clang/test/Driver/clang-translation.c
@@ -71,6 +71,12 @@
// PPCPWR7: "-target-cpu" "pwr7"
// RUN: %clang -target powerpc64-unknown-linux-gnu \
+// RUN: -### -S %s -mcpu=power8 2>&1 | FileCheck -check-prefix=PPCPWR8 %s
+// PPCPWR8: clang
+// PPCPWR8: "-cc1"
+// PPCPWR8: "-target-cpu" "pwr8"
+
+// RUN: %clang -target powerpc64-unknown-linux-gnu \
// RUN: -### -S %s -mcpu=a2q 2>&1 | FileCheck -check-prefix=PPCA2Q %s
// PPCA2Q: clang
// PPCA2Q: "-cc1"
Index: llvm-suse/tools/clang/test/Driver/linux-ld.c
===================================================================
--- llvm-suse.orig/tools/clang/test/Driver/linux-ld.c
+++ llvm-suse/tools/clang/test/Driver/linux-ld.c
@@ -338,6 +338,13 @@
// CHECK-ARM-HF: "-m" "armelf_linux_eabi"
// CHECK-ARM-HF: "-dynamic-linker" "{{.*}}/lib/ld-linux-armhf.so.3"
//
+// RUN: %clang %s -### -o %t.o 2>&1 \
+// RUN: --target=powerpc64le-linux-gnu \
+// RUN: | FileCheck --check-prefix=CHECK-PPC64LE %s
+// CHECK-PPC64LE: "{{.*}}ld{{(.exe)?}}"
+// CHECK-PPC64LE: "-m" "elf64lppc"
+// CHECK-PPC64LE: "-dynamic-linker" "{{.*}}/lib64/ld64.so.2"
+//
// Check that we do not pass --hash-style=gnu and --hash-style=both to linker
// and provide correct path to the dynamic linker and emulation mode when build
// for MIPS platforms.
Index: llvm-suse/tools/clang/test/Driver/ppc-features.cpp
===================================================================
--- llvm-suse.orig/tools/clang/test/Driver/ppc-features.cpp
+++ llvm-suse/tools/clang/test/Driver/ppc-features.cpp
@@ -59,9 +59,12 @@
// RUN: %clang -target powerpc64-unknown-linux-gnu %s -fno-altivec -mcpu=pwr7 -### -o %t.o 2>&1 | FileCheck --check-prefix=CHECK-14 %s
// CHECK-14: "-target-feature" "-altivec"
-// RUN: %clang -target powerpc64-unknown-linux-gnu %s -fno-altivec -mcpu=ppc64 -### -o %t.o 2>&1 | FileCheck --check-prefix=CHECK-15 %s
+// RUN: %clang -target powerpc64-unknown-linux-gnu %s -fno-altivec -mcpu=pwr8 -### -o %t.o 2>&1 | FileCheck --check-prefix=CHECK-15 %s
// CHECK-15: "-target-feature" "-altivec"
+// RUN: %clang -target powerpc64-unknown-linux-gnu %s -fno-altivec -mcpu=ppc64 -### -o %t.o 2>&1 | FileCheck --check-prefix=CHECK-16 %s
+// CHECK-16: "-target-feature" "-altivec"
+
// RUN: %clang -target powerpc64-unknown-linux-gnu %s -mno-qpx -### -o %t.o 2>&1 | FileCheck -check-prefix=CHECK-NOQPX %s
// CHECK-NOQPX: "-target-feature" "-qpx"
@@ -92,3 +95,21 @@
// RUN: %clang -target powerpc64-unknown-linux-gnu %s -mno-vsx -mvsx -### -o %t.o 2>&1 | FileCheck -check-prefix=CHECK-VSX %s
// CHECK-VSX: "-target-feature" "+vsx"
+// Assembler features
+// RUN: %clang -target powerpc64-unknown-linux-gnu %s -### -o %t.o 2>&1 | FileCheck -check-prefix=CHECK_BE_AS_ARGS %s
+// CHECK_BE_AS_ARGS: "-mppc64"
+// CHECK_BE_AS_ARGS: "-many"
+
+// RUN: %clang -target powerpc64le-unknown-linux-gnu %s -### -o %t.o 2>&1 | FileCheck -check-prefix=CHECK_LE_AS_ARGS %s
+// CHECK_LE_AS_ARGS: "-mppc64"
+// CHECK_LE_AS_ARGS: "-many"
+// CHECK_LE_AS_ARGS: "-mlittle-endian"
+
+// linker features
+// RUN: %clang -target powerpc64-unknown-linux-gnu %s -### -o %t.o 2>&1 | FileCheck -check-prefix=CHECK_BE_LD_ARGS %s
+// CHECK_BE_LD_ARGS: "elf64ppc"
+
+// RUN: %clang -target powerpc64le-unknown-linux-gnu %s -### -o %t.o 2>&1 | FileCheck -check-prefix=CHECK_LE_LD_ARGS %s
+// CHECK_LE_LD_ARGS: "elf64lppc"
+
+
Index: llvm-suse/tools/clang/test/Parser/altivec-csk-bool.c
===================================================================
--- llvm-suse.orig/tools/clang/test/Parser/altivec-csk-bool.c
+++ llvm-suse/tools/clang/test/Parser/altivec-csk-bool.c
@@ -1,4 +1,5 @@
-// RUN: %clang -target powerpc64-unknown-linux-gnu -maltivec -fsyntax-only %s
+// RUN: %clang_cc1 -triple powerpc64-unknown-linux-gnu -faltivec -fsyntax-only %s
+// RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -faltivec -fsyntax-only %s
// PR16456: Verify that bool, true, false are treated as context-sensitive
// keywords (and therefore available for use as identifiers) when in
Index: llvm-suse/tools/clang/test/Parser/altivec.c
===================================================================
--- llvm-suse.orig/tools/clang/test/Parser/altivec.c
+++ llvm-suse/tools/clang/test/Parser/altivec.c
@@ -1,4 +1,6 @@
// RUN: %clang_cc1 -triple=powerpc-apple-darwin8 -faltivec -fsyntax-only -verify %s
+// RUN: %clang_cc1 -triple=powerpc64-unknown-linux-gnu -faltivec -fsyntax-only -verify %s
+// RUN: %clang_cc1 -triple=powerpc64le-unknown-linux-gnu -faltivec -fsyntax-only -verify %s
__vector char vv_c;
__vector signed char vv_sc;
Index: llvm-suse/tools/clang/test/Parser/cxx-altivec.cpp
===================================================================
--- llvm-suse.orig/tools/clang/test/Parser/cxx-altivec.cpp
+++ llvm-suse/tools/clang/test/Parser/cxx-altivec.cpp
@@ -1,4 +1,6 @@
// RUN: %clang_cc1 -triple=powerpc-apple-darwin8 -faltivec -fsyntax-only -verify -std=c++11 %s
+// RUN: %clang_cc1 -triple=powerpc64-unknown-linux-gnu -faltivec -fsyntax-only -verify -std=c++11 %s
+// RUN: %clang_cc1 -triple=powerpc64le-unknown-linux-gnu -faltivec -fsyntax-only -verify -std=c++11 %s
__vector char vv_c;
__vector signed char vv_sc;
Index: llvm-suse/tools/clang/test/Preprocessor/init.c
===================================================================
--- llvm-suse.orig/tools/clang/test/Preprocessor/init.c
+++ llvm-suse/tools/clang/test/Preprocessor/init.c
@@ -1929,6 +1929,7 @@
// PPC64LE:#define _ARCH_PWR6 1
// PPC64LE:#define _ARCH_PWR6X 1
// PPC64LE:#define _ARCH_PWR7 1
+// PPC64LE:#define _CALL_ELF 2
// PPC64LE:#define _LITTLE_ENDIAN 1
// PPC64LE:#define _LP64 1
// PPC64LE:#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__
@@ -2190,6 +2191,34 @@
// PPCPOWER7:#define _ARCH_PWR6X 1
// PPCPOWER7:#define _ARCH_PWR7 1
//
+// RUN: %clang_cc1 -E -dM -ffreestanding -triple=powerpc64-none-none -target-cpu pwr8 -fno-signed-char < /dev/null | FileCheck -check-prefix PPCPWR8 %s
+//
+// PPCPWR8:#define _ARCH_PPC 1
+// PPCPWR8:#define _ARCH_PPC64 1
+// PPCPWR8:#define _ARCH_PPCGR 1
+// PPCPWR8:#define _ARCH_PPCSQ 1
+// PPCPWR8:#define _ARCH_PWR4 1
+// PPCPWR8:#define _ARCH_PWR5 1
+// PPCPWR8:#define _ARCH_PWR5X 1
+// PPCPWR8:#define _ARCH_PWR6 1
+// PPCPWR8:#define _ARCH_PWR6X 1
+// PPCPWR8:#define _ARCH_PWR7 1
+// PPCPWR8:#define _ARCH_PWR8 1
+//
+// RUN: %clang_cc1 -E -dM -ffreestanding -triple=powerpc64-none-none -target-cpu power8 -fno-signed-char < /dev/null | FileCheck -check-prefix PPCPOWER8 %s
+//
+// PPCPOWER8:#define _ARCH_PPC 1
+// PPCPOWER8:#define _ARCH_PPC64 1
+// PPCPOWER8:#define _ARCH_PPCGR 1
+// PPCPOWER8:#define _ARCH_PPCSQ 1
+// PPCPOWER8:#define _ARCH_PWR4 1
+// PPCPOWER8:#define _ARCH_PWR5 1
+// PPCPOWER8:#define _ARCH_PWR5X 1
+// PPCPOWER8:#define _ARCH_PWR6 1
+// PPCPOWER8:#define _ARCH_PWR6X 1
+// PPCPOWER8:#define _ARCH_PWR7 1
+// PPCPOWER8:#define _ARCH_PWR8 1
+//
// RUN: %clang_cc1 -E -dM -ffreestanding -triple=powerpc64-unknown-linux-gnu -fno-signed-char < /dev/null | FileCheck -check-prefix PPC64-LINUX %s
//
// PPC64-LINUX:#define _ARCH_PPC 1
Index: llvm-suse/tools/clang/test/Sema/attr-aligned.c
===================================================================
--- llvm-suse.orig/tools/clang/test/Sema/attr-aligned.c
+++ llvm-suse/tools/clang/test/Sema/attr-aligned.c
@@ -20,6 +20,12 @@ char a1[__alignof__(struct struct_with_u
char a2[__alignof__(a) == 1? : -1] = { 0 };
char a3[sizeof(a) == 1? : -1] = { 0 };
+typedef long long __attribute__((aligned(1))) underaligned_longlong;
+char a4[__alignof__(underaligned_longlong) == 1 ?: -1] = {0};
+
+typedef long long __attribute__((aligned(1))) underaligned_complex_longlong;
+char a5[__alignof__(underaligned_complex_longlong) == 1 ?: -1] = {0};
+
// rdar://problem/8335865
int b __attribute__((aligned(2)));
char b1[__alignof__(b) == 2 ?: -1] = {0};