[22/29] rs6000: Add VSX builtins

Message ID cc60cc7db04986c62e46e0695bae85e3603e27fc.1595809584.git.wschmidt@linux.ibm.com
State New
Headers show
Series
  • rs6000: Auto-generate builtins from descriptions [V2]
Related show

Commit Message

Bill Schmidt July 27, 2020, 2:14 p.m.
From: Bill Schmidt <wschmidt@linux.ibm.com>


2020-07-26  Bill Schmidt  <wschmidt@linux.ibm.com>

	* config/rs6000/rs6000-builtin-new.def: Add VSX builtins.
---
 gcc/config/rs6000/rs6000-builtin-new.def | 840 +++++++++++++++++++++++
 1 file changed, 840 insertions(+)

-- 
2.17.1

Patch

diff --git a/gcc/config/rs6000/rs6000-builtin-new.def b/gcc/config/rs6000/rs6000-builtin-new.def
index 0b79f155389..6c60177e4bb 100644
--- a/gcc/config/rs6000/rs6000-builtin-new.def
+++ b/gcc/config/rs6000/rs6000-builtin-new.def
@@ -1020,3 +1020,843 @@ 
   const vss __builtin_vec_set_v8hi (vss, signed short, const int<3>);
     VEC_SET_V8HI nothing {set}
 
+
+; VSX builtins.
+[vsx]
+  pure vsq __builtin_altivec_lvx_v1ti (signed long long, void *);
+    LVX_V1TI altivec_lvx_v1ti {ldvec}
+
+  pure vd __builtin_altivec_lvx_v2df (signed long long, void *);
+    LVX_V2DF altivec_lvx_v2df {ldvec}
+
+  pure vsll __builtin_altivec_lvx_v2di (signed long long, void *);
+    LVX_V2DI altivec_lvx_v2di {ldvec}
+
+  pure vd __builtin_altivec_lvxl_v2df (signed long long, void *);
+    LVXL_V2DF altivec_lvxl_v2df {ldvec}
+
+  pure vsll __builtin_altivec_lvxl_v2di (signed long long, void *);
+    LVXL_V2DI altivec_lvxl_v2di {ldvec}
+
+  const vd __builtin_altivec_nabs_v2df (vd);
+    NABS_V2DF vsx_nabsv2df2 {}
+
+  const vsll __builtin_altivec_nabs_v2di (vsll);
+    NABS_V2DI nabsv2di2 {}
+
+  void __builtin_altivec_stvx_v2df (vd, signed long long, void *);
+    STVX_V2DF altivec_stvx_v2df {stvec}
+
+  void __builtin_altivec_stvx_v2di (vop, signed long long, void *);
+    STVX_V2DI altivec_stvx_v2di {stvec}
+
+  void __builtin_altivec_stvxl_v2df (vd, signed long long, void *);
+    STVXL_V2DF altivec_stvxl_v2df {stvec}
+
+  void __builtin_altivec_stvxl_v2di (vop, signed long long, void *);
+    STVXL_V2DI altivec_stvxl_v2di {stvec}
+
+  const vd __builtin_altivec_vand_v2df (vd, vd);
+    VAND_V2DF andv2df3 {}
+
+  const vsll __builtin_altivec_vand_v2di (vsll, vsll);
+    VAND_V2DI andv2di3 {}
+
+  const vull __builtin_altivec_vand_v2di_uns (vull, vull);
+    VAND_V2DI_UNS andv2di3 {}
+
+  const vd __builtin_altivec_vandc_v2df (vd, vd);
+    VANDC_V2DF andcv2df3 {}
+
+  const vsll __builtin_altivec_vandc_v2di (vsll, vsll);
+    VANDC_V2DI andcv2di3 {}
+
+  const vull __builtin_altivec_vandc_v2di_uns (vull, vull);
+    VANDC_V2DI_UNS andcv2di3 {}
+
+  const vd __builtin_altivec_vnor_v2df (vd, vd);
+    VNOR_V2DF norv2df3 {}
+
+  const vsll __builtin_altivec_vnor_v2di (vsll, vsll);
+    VNOR_V2DI norv2di3 {}
+
+  const vull __builtin_altivec_vnor_v2di_uns (vull, vull);
+    VNOR_V2DI_UNS norv2di3 {}
+
+  const vd __builtin_altivec_vor_v2df (vd, vd);
+    VOR_V2DF iorv2df3 {}
+
+  const vsll __builtin_altivec_vor_v2di (vsll, vsll);
+    VOR_V2DI iorv2di3 {}
+
+  const vull __builtin_altivec_vor_v2di_uns (vull, vull);
+    VOR_V2DI_UNS iorv2di3 {}
+
+  const vd __builtin_altivec_vperm_2df (vd, vd, vuc);
+    VPERM_2DF altivec_vperm_v2df {}
+
+  const vsll __builtin_altivec_vperm_2di (vsll, vsll, vuc);
+    VPERM_2DI altivec_vperm_v2di {}
+
+  const vull __builtin_altivec_vperm_2di_uns (vull, vull, vuc);
+    VPERM_2DI_UNS altivec_vperm_v2di_uns {}
+
+  const vd __builtin_altivec_vreve_v2df (vd);
+    VREVE_V2DF altivec_vrevev2df2 {}
+
+  const vsll __builtin_altivec_vreve_v2di (vsll);
+    VREVE_V2DI altivec_vrevev2di2 {}
+
+  const vd __builtin_altivec_vsel_2df (vd, vd, vop);
+    VSEL_2DF vector_select_v2df {}
+
+  const vsll __builtin_altivec_vsel_2di (vsll, vsll, vsll, vbll);
+    VSEL_2DI_B vector_select_v2di {}
+
+  const vull __builtin_altivec_vsel_2di_uns (vull, vull, vull);
+    VSEL_2DI_UNS vector_select_v2di_uns {}
+
+  const vd __builtin_altivec_vsldoi_2df (vd, vd, const int<4>);
+    VSLDOI_2DF altivec_vsldoi_v2df {}
+
+  const vsll __builtin_altivec_vsldoi_2di (vsll, vsll, const int<4>);
+    VSLDOI_2DI altivec_vsldoi_v2di {}
+
+  const vd __builtin_altivec_vxor_v2df (vd, vd);
+    VXOR_V2DF xorv2df3 {}
+
+  const vsll __builtin_altivec_vxor_v2di (vsll, vsll);
+    VXOR_V2DI xorv2di3 {}
+
+  const vull __builtin_altivec_vxor_v2di_uns (vull, vull);
+    VXOR_V2DI_UNS xorv2di3 {}
+
+  const vbc __builtin_vsx_cmpge_16qi (vsc, vsc);
+    CMPGE_16QI vector_nltv16qi {}
+
+  const vbll __builtin_vsx_cmpge_2di (vsll, vsll);
+    CMPGE_2DI vector_nltv2di {}
+
+  const vbi __builtin_vsx_cmpge_4si (vsi, vsi);
+    CMPGE_4SI vector_nltv4si {}
+
+  const vbs __builtin_vsx_cmpge_8hi (vss, vss);
+    CMPGE_8HI vector_nltv8hi {}
+
+  const vbc __builtin_vsx_cmpge_u16qi (vuc, vuc);
+    CMPGE_U16QI vector_nltuv16qi {}
+
+  const vbll __builtin_vsx_cmpge_u2di (vull, vull);
+    CMPGE_U2DI vector_nltuv2di {}
+
+  const vbi __builtin_vsx_cmpge_u4si (vui, vui);
+    CMPGE_U4SI vector_nltuv4si {}
+
+  const vbs __builtin_vsx_cmpge_u8hi (vus, vus);
+    CMPGE_U8HI vector_nltuv8hi {}
+
+  const vbc __builtin_vsx_cmple_16qi (vsc, vsc);
+    CMPLE_16QI vector_ngtv16qi {}
+
+  const vbll __builtin_vsx_cmple_2di (vsll, vsll);
+    CMPLE_2DI vector_ngtv2di {}
+
+  const vbi __builtin_vsx_cmple_4si (vsi, vsi);
+    CMPLE_4SI vector_ngtv4si {}
+
+  const vbs __builtin_vsx_cmple_8hi (vss, vss);
+    CMPLE_8HI vector_ngtv8hi {}
+
+  const vbc __builtin_vsx_cmple_u16qi (vuc, vuc);
+    CMPLE_U16QI vector_ngtuv16qi {}
+
+  const vbll __builtin_vsx_cmple_u2di (vull, vull);
+    CMPLE_U2DI vector_ngtuv2di {}
+
+  const vbi __builtin_vsx_cmple_u4si (vui, vui);
+    CMPLE_U4SI vector_ngtuv4si {}
+
+  const vbs __builtin_vsx_cmple_u8hi (vus, vus);
+    CMPLE_U8HI vector_ngtuv8hi {}
+
+  const vd __builtin_vsx_concat_2df (double, double);
+    CONCAT_2DF vsx_concat_v2df {}
+
+  const vsll __builtin_vsx_concat_2di (signed long long, signed long long);
+    CONCAT_2DI vsx_concat_v2di {}
+
+  const vull __builtin_vsx_concat_2di_uns (unsigned long long, unsigned long long);
+    CONCAT_2DI_UNS vsx_concat_v2di {}
+
+  const vd __builtin_vsx_cpsgndp (vd, vd);
+    CPSGNDP vector_copysignv2df3 {}
+
+  const vf __builtin_vsx_cpsgnsp (vf, vf);
+    CPSGNSP vector_copysignv4sf3 {}
+
+  const vsll __builtin_vsx_div_2di (vsll, vsll);
+    DIV_V2DI vsx_div_v2di {}
+
+  const vd __builtin_vsx_doublee_v4sf (vf);
+    DOUBLEE_V4SF doubleev4sf2 {}
+
+  const vd __builtin_vsx_doublee_v4si (vsi);
+    DOUBLEE_V4SI doubleev4si2 {}
+
+  const vd __builtin_vsx_doubleh_v4sf (vf);
+    DOUBLEH_V4SF doublehv4sf2 {}
+
+  const vd __builtin_vsx_doubleh_v4si (vsi);
+    DOUBLEH_V4SI doublehv4si2 {}
+
+  const vd __builtin_vsx_doublel_v4sf (vf);
+    DOUBLEL_V4SF doublelv4sf2 {}
+
+  const vd __builtin_vsx_doublel_v4si (vsi);
+    DOUBLEL_V4SI doublelv4si2 {}
+
+  const vd __builtin_vsx_doubleo_v4sf (vf);
+    DOUBLEO_V4SF doubleov4sf2 {}
+
+  const vd __builtin_vsx_doubleo_v4si (vsi);
+    DOUBLEO_V4SI doubleov4si2 {}
+
+  const vf __builtin_vsx_floate_v2df (vd);
+    FLOATE_V2DF floatev2df {}
+
+  const vf __builtin_vsx_floate_v2di (vsll);
+    FLOATE_V2DI floatev2di {}
+
+  const vf __builtin_vsx_floato_v2df (vd);
+    FLOATO_V2DF floatov2df {}
+
+  const vf __builtin_vsx_floato_v2di (vsll);
+    FLOATO_V2DI floatov2di {}
+
+; #### For the following, currently the pattern is selected differently
+; depending on big-endian (e.g., vsx_load_v1ti) versus little-endian
+; (e.g, vsx_ld_elemrev_v1ti).  We need to move the choice into a separate
+; pattern for each of these instead.  Right now I only list the little-
+; endian pattern here.  TBD.
+  pure vsq __builtin_vsx_ld_elemrev_v1ti (signed long long, void *);
+    LD_ELEMREV_V1TI vsx_ld_elemrev_v1ti {ldvec}
+
+  pure vd __builtin_vsx_ld_elemrev_v2df (signed long long, void *);
+    LD_ELEMREV_V2DF vsx_ld_elemrev_v2df {ldvec}
+
+  pure vsll __builtin_vsx_ld_elemrev_v2di (signed long long, void *);
+    LD_ELEMREV_V2DI vsx_ld_elemrev_v2di {ldvec}
+
+  pure vf __builtin_vsx_ld_elemrev_v4sf (signed long long, void *);
+    LD_ELEMREV_V4SF vsx_ld_elemrev_v4sf {ldvec}
+
+  pure vsi __builtin_vsx_ld_elemrev_v4si (signed long long, void *);
+    LD_ELEMREV_V4SI vsx_ld_elemrev_v4si {ldvec}
+
+  pure vss __builtin_vsx_ld_elemrev_v8hi (signed long long, void *);
+    LD_ELEMREV_V8HI vsx_ld_elemrev_v8hi {ldvec}
+
+  pure vsc __builtin_vsx_ld_elemrev_v16qi (signed long long, void *);
+    LD_ELEMREV_V16QI vsx_ld_elemrev_v16qi {ldvec}
+
+; There is apparent intent in rs6000-builtin.def to have RS6000_BTC_SPECIAL
+; processing for LXSDX, LXVDSX, and STXSDX, but there are no def_builtin calls
+; for any of them.  At some point, we may want to add a set of built-ins for
+; whichever vector types make sense for these.
+
+  pure vsq __builtin_vsx_lxvd2x_v1ti (signed long long, void *);
+    LXVD2X_V1TI vsx_load_v1ti {ldvec}
+
+  pure vd __builtin_vsx_lxvd2x_v2df (signed long long, void *);
+    LXVD2X_V2DF vsx_load_v2df {ldvec}
+
+  pure vsll __builtin_vsx_lxvd2x_v2di (signed long long, void *);
+    LXVD2X_V2DI vsx_load_v2di {ldvec}
+
+  pure vsc __builtin_vsx_lxvw4x_16qi (signed long long, void *);
+    LXVW4X_V16QI vsx_load_v16qi {ldvec}
+
+  pure vf __builtin_vsx_lxvw4x_v4sf (signed long long, void *);
+    LXVW4X_V4SF vsx_load_v4sf {ldvec}
+
+  pure vsi __builtin_vsx_lxvw4x_v4si (signed long long, void *);
+    LXVW4X_V4SI vsx_load_v4si {ldvec}
+
+  pure vss __builtin_vsx_lxvw4x_v8hi (signed long long, void *);
+    LXVW4X_V8HI vsx_load_v8hi {ldvec}
+
+  const vd __builtin_vsx_mergeh_2df (vd, vd);
+    VEC_MERGEH_V2DF vsx_mergeh_v2df {}
+
+  const vsll __builtin_vsx_mergeh_2di (vsll, vsll);
+    VEC_MERGEH_V2DI vsx_mergeh_v2di {}
+
+  const vd __builtin_vsx_mergel_2df (vd, vd);
+    VEC_MERGEL_V2DF vsx_mergel_v2df {}
+
+  const vsll __builtin_vsx_mergel_2di (vsll, vsll);
+    VEC_MERGEL_V2DI vsx_mergel_v2di {}
+
+  const vsll __builtin_vsx_mul_2di (vsll, vsll);
+    MUL_V2DI vsx_mul_v2di {}
+
+  const vsq __builtin_vsx_set_1ti (vsq, signed __int128, const int<0,0>);
+    SET_1TI vsx_set_v1ti {set}
+
+  const vuq __builtin_vsx_set_1ti_uns (vuq, unsigned __int128, const int<0,0>);
+    SET_1TI_UNS vsx_set_v1ti {set}
+
+  const vd __builtin_vsx_set_2df (vd, double, const int<0,1>);
+    SET_2DF vsx_set_v2df {set}
+
+  const vsll __builtin_vsx_set_2di (vsll, signed long long, const int<0,1>);
+    SET_2DI vsx_set_v2di {set}
+
+  const vull __builtin_vsx_set_2di_uns (vull, unsigned long long, const int<0,1>);
+    SET_2DI_UNS vsx_set_v2di {set}
+
+  const vd __builtin_vsx_splat_2df (double);
+    SPLAT_2DF vsx_splat_v2df {}
+
+  const vsll __builtin_vsx_splat_2di (signed long long);
+    SPLAT_2DI vsx_splat_v2di {}
+
+  const vull __builtin_vsx_splat_2di_uns (unsigned long long);
+    SPLAT_2DI_UNS vsx_splat_v2di {}
+
+; #### For the following, currently the pattern is selected differently
+; depending on big-endian (e.g., vsx_store_v1ti) versus little-endian
+; (e.g, vsx_st_elemrev_v1ti).  We need to move the choice into a separate
+; pattern for each of these instead.  Right now I only list the little-
+; endian pattern here.  TBD.
+  void __builtin_vsx_st_elemrev_v1ti (vsq, signed long long, void *);
+    ST_ELEMREV_V1TI vsx_st_elemrev_v1ti {stvec}
+
+  void __builtin_vsx_st_elemrev_v2df (vd, signed long long, void *);
+    ST_ELEMREV_V2DF vsx_st_elemrev_v2df {stvec}
+
+  void __builtin_vsx_st_elemrev_v2di (vsll, signed long long, void *);
+    ST_ELEMREV_V2DI vsx_st_elemrev_v2di {stvec}
+
+  void __builtin_vsx_st_elemrev_v4sf (vf, signed long long, void *);
+    ST_ELEMREV_V4SF vsx_st_elemrev_v4sf {stvec}
+
+  void __builtin_vsx_st_elemrev_v4si (vsi, signed long long, void *);
+    ST_ELEMREV_V4SI vsx_st_elemrev_v4si {stvec}
+
+  void __builtin_vsx_st_elemrev_v8hi (vss, signed long long, void *);
+    ST_ELEMREV_V8HI vsx_st_elemrev_v8hi {stvec}
+
+  void __builtin_vsx_st_elemrev_v16qi (vsc, signed long long, void *);
+    ST_ELEMREV_V16QI vsx_st_elemrev_v16qi {stvec}
+
+  void __builtin_vsx_stxvd2x_v1ti (vsq, signed long long, void *);
+    STXVD2X_V1TI vsx_store_v1ti {stvec}
+
+  void __builtin_vsx_stxvd2x_v2df (vd, signed long long, void *);
+    STXVD2X_V2DF vsx_store_v2df {stvec}
+
+  void __builtin_vsx_stxvd2x_v2di (vsll, signed long long, void *);
+    STXVD2X_V2DI vsx_store_v2di {stvec}
+
+  void __builtin_vsx_stxvw4x_v4sf (vf, signed long long, void *);
+    STXVW4X_V4SF vsx_store_v4sf {stvec}
+
+  void __builtin_vsx_stxvw4x_v4si (vsi, signed long long, void *);
+    STXVW4X_V4SI vsx_store_v4si {stvec}
+
+  void __builtin_vsx_stxvw4x_v8hi (vss, signed long long, void *);
+    STXVW4X_V8HI vsx_store_v8hi {stvec}
+
+  void __builtin_vsx_stxvw4x_v16qi (vsc, signed long long, void *);
+    STXVW4X_V16QI vsx_store_v16qi {stvec}
+
+  const vull __builtin_vsx_udiv_2di (vull, vull);
+    UDIV_V2DI vsx_udiv_v2di {}
+
+  const vd __builtin_vsx_uns_doublee_v4si (vui);
+    UNS_DOUBLEE_V4SI unsdoubleev4si2 {}
+
+  const vd __builtin_vsx_uns_doubleh_v4si (vui);
+    UNS_DOUBLEH_V4SI unsdoublehv4si2 {}
+
+  const vd __builtin_vsx_uns_doublel_v4si (vui);
+    UNS_DOUBLEL_V4SI unsdoublelv4si2 {}
+
+  const vd __builtin_vsx_uns_doubleo_v4si (vui);
+    UNS_DOUBLEO_V4SI unsdoubleov4si2 {}
+
+  const vf __builtin_vsx_uns_floate_v2di (vull);
+    UNS_FLOATE_V2DI unsfloatev2di {}
+
+  const vf __builtin_vsx_uns_floato_v2di (vull);
+    UNS_FLOATO_V2DI unsfloatov2di {}
+
+  const signed __int128 __builtin_vsx_vec_ext_v1ti (vsq, signed int);
+    VEC_EXT_V1TI nothing {extract}
+
+  const double __builtin_vsx_vec_ext_v2df (vd, signed int);
+    VEC_EXT_V2DF nothing {extract}
+
+  const signed long long __builtin_vsx_vec_ext_v2di (vsll, signed int);
+    VEC_EXT_V2DI nothing {extract}
+
+  const vsq __builtin_vsx_vec_init_v1ti (signed __int128);
+    VEC_INIT_V1TI nothing {init}
+
+  const vd __builtin_vsx_vec_init_v2df (double, double);
+    VEC_INIT_V2DF nothing {init}
+
+  const vsll __builtin_vsx_vec_init_v2di (signed long long, signed long long);
+    VEC_INIT_V2DI nothing {init}
+
+  const vsq __builtin_vsx_vec_set_v1ti (vsq, signed __int128, const int<0,0>);
+    VEC_SET_V1TI nothing {set}
+
+  const vd __builtin_vsx_vec_set_v2df (vd, double, const int<1>);
+    VEC_SET_V2DF nothing {set}
+
+  const vsll __builtin_vsx_vec_set_v2di (vsll, signed long long, const int<1>);
+    VEC_SET_V2DI nothing {set}
+
+  const vsll __builtin_vsx_vsigned_v2df (vd);
+    VEC_VSIGNED_V2DF vsx_xvcvdpsxds {}
+
+  const vsi __builtin_vsx_vsigned_v4sf (vf);
+    VEC_VSIGNED_V4SF vsx_xvcvspsxws {}
+
+  const vsll __builtin_vsx_vsignede_v2df (vd);
+    VEC_VSIGNEDE_V2DF vsignede_v2df {}
+
+  const vsll __builtin_vsx_vsignedo_v2df (vd);
+    VEC_VSIGNEDO_V2DF vsignedo_v2df {}
+
+  const vull __builtin_vsx_vunsigned_v2df (vd);
+    VEC_VUNSIGNED_V2DF vsx_xvcvdpsxds {}
+
+  const vui __builtin_vsx_vunsigned_v4sf (vf);
+    VEC_VUNSIGNED_V4SF vsx_xvcvspsxws {}
+
+  const vull __builtin_vsx_vunsignede_v2df (vd);
+    VEC_VUNSIGNEDE_V2DF vunsignede_v2df {}
+
+  const vull __builtin_vsx_vunsignedo_v2df (vd);
+    VEC_VUNSIGNEDO_V2DF vunsignedo_v2df {}
+
+  const vf __builtin_vsx_xscvdpsp (vd);
+    XSCVDPSP vsx_xscvdpsp {}
+
+  const vd __builtin_vsx_xscvspdp (vf);
+    XSCVSPDP vsx_xscvspdp {}
+
+  const double __builtin_vsx_xsmaxdp (double, double);
+    XSMAXDP smaxdf3 {}
+
+  const double __builtin_vsx_xsmindp (double, double);
+    XSMINDP smindf3 {}
+
+  const vd __builtin_vsx_xsrdpi (vd);
+    XSRDPI vsx_xsrdpi {}
+
+  const vd __builtin_vsx_xsrdpic (vd);
+    XSRDPIC vsx_xsrdpic {}
+
+  const vd __builtin_vsx_xsrdpim (vd);
+    XSRDPIM floordf2 {}
+
+  const vd __builtin_vsx_xsrdpip (vd);
+    XSRDPIP ceildf2 {}
+
+  const vd __builtin_vsx_xsrdpiz (vd);
+    XSRDPIZ btruncdf2 {}
+
+  const unsigned int __builtin_vsx_xstdivdp_fe (vd, vd);
+    XSTDIVDP_FE vsx_tdivdf3_fe {}
+
+  const unsigned int __builtin_vsx_xstdivdp_fg (vd, vd);
+    XSTDIVDP_FG vsx_tdivdf3_fg {}
+
+  const unsigned int __builtin_vsx_xstsqrtdp_fe (vd);
+    XSTSQRTDP_FE vsx_tsqrtdf2_fe {}
+
+  const unsigned int __builtin_vsx_xstsqrtdp_fg (vd);
+    XSTSQRTDP_FG vsx_tsqrtdf2_fg {}
+
+  const vd __builtin_vsx_xvabsdp (vd);
+    XVABSDP absv2df2 {}
+
+  const vf __builtin_vsx_xvabssp (vf);
+    XVABSSP absv4sf2 {}
+
+  fpmath vd __builtin_vsx_xvadddp (vd, vd);
+    XVADDDP addv2df3 {}
+
+  fpmath vf __builtin_vsx_xvaddsp (vf, vf);
+    XVADDSP addv4sf3 {}
+
+  const vbll __builtin_vsx_xvcmpeqdp (vd, vd);
+    XVCMPEQDP vector_eqv2df {}
+
+; This predicate isn't used in the ALL or ANY interfaces; it appears
+; to return a vector rather than an integer as other predicates do.
+  const vull __builtin_vsx_xvcmpeqdp_p (vd);
+    XVCMPEQDP_P vector_eq_v2df_p {pred}
+
+  const vbi __builtin_vsx_xvcmpeqsp (vf, vf);
+    XVCMPEQSP vector_eqv4sf {}
+
+; This predicate isn't used in the ALL or ANY interfaces; it appears
+; to return a vector rather than an integer as other predicates do.
+  const vui __builtin_vsx_xvcmpeqsp_p (vf);
+    XVCMPEQSP_P vector_eq_v4sf_p {pred}
+
+  const vbll __builtin_vsx_xvcmpgedp (vd, vd);
+    XVCMPGEDP vector_gev2df {}
+
+; This predicate isn't used in the ALL or ANY interfaces; it appears
+; to return a vector rather than an integer as other predicates do.
+  const vull __builtin_vsx_xvcmpgedp_p (vd);
+    XVCMPGEDP_P vector_ge_v2df_p {pred}
+
+  const vbi __builtin_vsx_xvcmpgesp (vf, vf);
+    XVCMPGESP vector_gev4sf {}
+
+; This predicate isn't used in the ALL or ANY interfaces; it appears
+; to return a vector rather than an integer as other predicates do.
+  const vui __builtin_vsx_xvcmpgesp_p (vf);
+    XVCMPGESP_P vector_ge_v4sf_p {pred}
+
+  const vbll __builtin_vsx_xvcmpgtdp (vd, vd);
+    XVCMPGTDP vector_gtv2df {}
+
+; This predicate isn't used in the ALL or ANY interfaces; it appears
+; to return a vector rather than an integer as other predicates do.
+  const vull __builtin_vsx_xvcmpgtdp_p (vd);
+    XVCMPGTDP_P vector_gt_v2df_p {pred}
+
+  const vbi __builtin_vsx_xvcmpgtsp (vf, vf);
+    XVCMPGTSP vector_gtv4sf {}
+
+; This predicate isn't used in the ALL or ANY interfaces; it appears
+; to return a vector rather than an integer as other predicates do.
+  const vui __builtin_vsx_xvcmpgtsp_p (vf, vf);
+    XVCMPGTSP_P vector_gt_v4sf_p {pred}
+
+  const vsll __builtin_vsx_xvcvdpsxds (vd);
+    XVCVDPSXDS vsx_fix_truncv2dfv2di2 {}
+
+  const vsll __builtin_vsx_xvcvdpsxds_scale (vd, const int);
+    XVCVDPSXDS_SCALE vsx_xvcvdpsxds_scale {}
+
+  const vsll __builtin_vsx_xvcvdpsxws (vd);
+    XVCVDPSXWS vsx_xvcvdpsxws {}
+
+  const vull __builtin_vsx_xvcvdpuxds (vd);
+    XVCVDPUXDS vsx_fixuns_truncv2dfv2di2 {}
+
+  const vull __builtin_vsx_xvcvdpuxds_scale (vd, const int);
+    XVCVDPUXDS_SCALE vsx_xvcvdpuxds_scale {}
+
+; Redundant with __builtin_vsx_xvcvdpuxds
+  const vull __builtin_vsx_xvcvdpuxds_uns (vd);
+    XVCVDPUXDS_UNS vsx_fixuns_truncv2dfv2di2 {}
+
+  const vull __builtin_vsx_xvcvdpuxws (vd);
+    XVCVDPUXWS vsx_xvcvdpuxws {}
+
+  const vsll __builtin_vsx_xvcvspsxds (vf);
+    XVCVSPSXDS vsx_xvcvspsxds {}
+
+  const vsi __builtin_vsx_xvcvspsxws (vf);
+    XVCVSPSXWS vsx_fix_truncv4sfv4si2 {}
+
+  const vull __builtin_vsx_xvcvspuxds (vf);
+    XVCVSPUXDS vsx_xvcvspuxds {}
+
+  const vui __builtin_vsx_xvcvspuxws (vf);
+    XVCVSPUXWS vsx_fixuns_truncv4sfv4si2 {}
+
+  const vd __builtin_vsx_xvcvsxddp (vsll);
+    XVCVSXDDP vsx_floatv2div2df2 {}
+
+  const vd __builtin_vsx_xvcvsxddp_scale (vsll, const int);
+    XVCVSXDDP_SCALE vsx_xvcvsxddp_scale {}
+
+  const vf __builtin_vsx_xvcvsxdsp (vsll);
+    XVCVSXDSP vsx_xvcvsxdsp {}
+
+  const vd __builtin_vsx_xvcvsxwdp (vsll);
+    XVCVSXWDP vsx_xvcvsxwdp {}
+
+; Need to pick one or the other here!!  ####
+  const vf __builtin_vsx_xvcvsxwsp (vsi);
+    XVCVSXWSP vsx_floatv4siv4sf2 {}
+  const vf __builtin_vsx_xvcvsxwsp (vsi);
+    XVCVSXWSP_V4SF vsx_xvcvsxwdp {}
+
+  const vd __builtin_vsx_xvcvuxddp (vull);
+    XVCVUXDDP vsx_floatunsv2div2df2 {}
+
+  const vd __builtin_vsx_xvcvuxddp_scale (vull, const int);
+    XVCVUXDDP_SCALE vsx_xvcvuxddp_scale {}
+
+; Redundant with __builtin_vsx_xvcvuxddp
+  const vd __builtin_vsx_xvcvuxddp_uns (vull);
+    XVCVUXDDP_UNS vsx_floatunsv2div2df2 {}
+
+  const vf __builtin_vsx_xvcvuxdsp (vull);
+    XVCVUXDSP vsx_xvcvuxdsp {}
+
+  const vd __builtin_vsx_xvcvuxwdp (vsll);
+    XVCVUXWDP vsx_xvcvuxwdp {}
+
+; Need to pick one or the other here!! ####
+  const vf __builtin_vsx_xvcvuxwsp (vui);
+    XVCVUXWSP vsx_floatunsv4siv4sf2 {}
+  const vf __builtin_vsx_xvcvuxwsp (vui);
+    XVCVUXWSP_V4SF vsx_xvcvuxwsp {}
+
+  fpmath vf __builtin_vsx_xvdivdp (vf, vf);
+    XVDIVDP divv2df3 {}
+
+  fpmath vf __builtin_vsx_xvdivsp (vf, vf);
+    XVDIVSP divv4sf3 {}
+
+  const vd __builtin_vsx_xvmadddp (vd, vd, vd);
+    XVMADDDP fmav2df4 {}
+
+  const vf __builtin_vsx_xvmaddsp (vf, vf, vf);
+    XVMADDSP fmav4sf4 {}
+
+  const vd __builtin_vsx_xvmaxdp (vd, vd);
+    XVMAXDP smaxv2df3 {}
+
+  const vf __builtin_vsx_xvmaxsp (vf, vf);
+    XVMAXSP smaxv4sf3 {}
+
+  const vd __builtin_vsx_xvmindp (vd, vd);
+    XVMINDP sminv2df3 {}
+
+  const vf __builtin_vsx_xvminsp (vf, vf);
+    XVMINSP sminv4sf3 {}
+
+  const vd __builtin_vsx_xvmsubdp (vd, vd, vd);
+    XVMSUBDP fmsv2df4 {}
+
+  const vf __builtin_vsx_xvmsubsp (vf, vf, vf);
+    XVMSUBSP fmsv4sf4 {}
+
+  fpmath vd __builtin_vsx_xvmuldp (vd, vd);
+    XVMULDP mulv2df3 {}
+
+  fpmath vf __builtin_vsx_xvmulsp (vf, vf);
+    XVMULSP mulv4sf3 {}
+
+  const vd __builtin_vsx_xvnabsdp (vd);
+    XVNABSDP vsx_nabsv2df2 {}
+
+  const vf __builtin_vsx_xvnabssp (vf);
+    XVNABSSP vsx_nabsv4sf2 {}
+
+  const vd __builtin_vsx_xvnegdp (vd);
+    XVNEGDP negv2df2 {}
+
+  const vf __builtin_vsx_xvnegsp (vf);
+    XVNEGSP negv4sf2 {}
+
+  const vd __builtin_vsx_xvnmadddp (vd, vd, vd);
+    XVNMADDDP nfmav2df4 {}
+
+  const vf __builtin_vsx_xvnmaddsp (vf, vf, vf);
+    XVNMADDSP nfmav4sf4 {}
+
+  const vd __builtin_vsx_xvnmsubdp (vd, vd, vd);
+    XVNMSUBDP nfmsv2df4 {}
+
+  const vf __builtin_vsx_xvnmsubsp (vf, vf, vf);
+    XVNMSUBSP nfmsv4sf4 {}
+
+  const vd __builtin_vsx_xvrdpi (vd);
+    XVRDPI vsx_xvrdpi {}
+
+  const vd __builtin_vsx_xvrdpic (vd);
+    XVRDPIC vsx_xvrdpic {}
+
+  const vd __builtin_vsx_xvrdpim (vd);
+    XVRDPIM vsx_floorv2df2 {}
+
+  const vd __builtin_vsx_xvrdpip (vd);
+    XVRDPIP vsx_ceilv2df2 {}
+
+  const vd __builtin_vsx_xvrdpiz (vd);
+    XVRDPIZ vsx_btruncv2df2 {}
+
+  fpmath vd __builtin_vsx_xvrecipdivdp (vd, vd);
+    RECIP_V2DF recipv2df3 {}
+
+  fpmath vf __builtin_vsx_xvrecipdivsp (vf, vf);
+    RECIP_V4SF recipv4sf3 {}
+
+  const vd __builtin_vsx_xvredp (vd);
+    XVREDP vsx_frev2df2 {}
+
+  const vf __builtin_vsx_xvresp (vf);
+    XVRESP vsx_frev4sf2 {}
+
+  const vf __builtin_vsx_xvrspi (vf);
+    XVRSPI vsx_xvrspi {}
+
+  const vf __builtin_vsx_xvrspic (vf);
+    XVRSPIC vsx_xvrspic {}
+
+  const vf __builtin_vsx_xvrspim (vf);
+    XVRSPIM vsx_floorv4sf2 {}
+
+  const vf __builtin_vsx_xvrspip (vf);
+    XVRSPIP vsx_ceilv4sf2 {}
+
+  const vf __builtin_vsx_xvrspiz (vf);
+    XVRSPIZ vsx_btruncv4sf2 {}
+
+  const vd __builtin_vsx_xvrsqrtdp (vd);
+    RSQRT_2DF rsqrtv2df2 {}
+
+  const vf __builtin_vsx_xvrsqrtsp (vf);
+    RSQRT_4SF rsqrtv4sf2 {}
+
+  const vd __builtin_vsx_xvrsqrtedp (vd);
+    XVRSQRTEDP rsqrtev2df2 {}
+
+  const vf __builtin_vsx_xvrsqrtesp (vf);
+    XVRSQRTESP rsqrtev4sf2 {}
+
+  const vd __builtin_vsx_xvsqrtdp (vd);
+    XVSQRTDP sqrtv2df2 {}
+
+  const vf __builtin_vsx_xvsqrtsp (vf);
+    XVSQRTSP sqrtv4sf2 {}
+
+  fpmath vd __builtin_vsx_xvsubdp (vd, vd);
+    XVSUBDP subv2df3 {}
+
+  fpmath vf __builtin_vsx_xvsubsp (vf, vf);
+    XVSUBSP subv4sf3 {}
+
+  const unsigned int __builtin_vsx_xvtdivdp_fe (vd, vd);
+    XVTDIVDP_FE vsx_tdivv2df3_fe {}
+
+  const unsigned int __builtin_vsx_xvtdivdp_fg (vd, vd);
+    XVTDIVDP_FG vsx_tdivv2df3_fg {}
+
+  const unsigned int __builtin_vsx_xvtdivsp_fe (vf, vf);
+    XVTDIVSP_FE vsx_tdivv4sf3_fe {}
+
+  const unsigned int __builtin_vsx_xvtdivsp_fg (vf, vf);
+    XVTDIVSP_FG vsx_tdivv4sf3_fg {}
+
+  const unsigned int __builtin_vsx_xvtsqrtdp_fe (vd);
+    XVTSQRTDP_FE vsx_tsqrtv2df2_fe {}
+
+  const unsigned int __builtin_vsx_xvtsqrtdp_fg (vd);
+    XVTSQRTDP_FG vsx_tsqrtv2df2_fg {}
+
+  const unsigned int __builtin_vsx_xvtsqrtsp_fe (vf);
+    XVTSQRTSP_FE vsx_tsqrtv4sf2_fe {}
+
+  const unsigned int __builtin_vsx_xvtsqrtsp_fg (vf);
+    XVTSQRTSP_FG vsx_tsqrtv4sf2_fg {}
+
+  const vf __builtin_vsx_xxmrghw (vf, vf);
+    XXMRGHW_4SF vsx_xxmrghw_v4sf {}
+
+  const vsi __builtin_vsx_xxmrghw_4si (vsi, vsi);
+    XXMRGHW_4SI vsx_xxmrghw_v4si {}
+
+  const vf __builtin_vsx_xxmrglw (vf, vf);
+    XXMRGLW_4SF vsx_xxmrglw_v4sf {}
+
+  const vss __builtin_vsx_xxmrglw_4si (vsi, vsi);
+    XXMRGLW_4SI vsx_xxmrglw_v4si {}
+
+  const vsc __builtin_vsx_xxpermdi_16qi (vsc, vsc, const int<1>);
+    XXPERMDI_16QI vsx_xxpermdi_v16qi {}
+
+  const vsq __builtin_vsx_xxpermdi_1ti (vsq, vsq, const int<1>);
+    XXPERMDI_1TI vsx_xxpermdi_v1ti {}
+
+  const vd __builtin_vsx_xxpermdi_2df (vd, vd, const int<1>);
+    XXPERMDI_2DF vsx_xxpermdi_v2df {}
+
+  const vsll __builtin_vsx_xxpermdi_2di (vsll, vsll, const int<1>);
+    XXPERMDI_2DI vsx_xxpermdi_v2di {}
+
+  const vf __builtin_vsx_xxpermdi_4sf (vf, vf, const int<1>);
+    XXPERMDI_4SF vsx_xxpermdi_v4sf {}
+
+  const vsi __builtin_vsx_xxpermdi_4si (vsi, vsi, const int<1>);
+    XXPERMDI_4SI vsx_xxpermdi_v4si {}
+
+  const vss __builtin_vsx_xxpermdi_8hi (vss, vss, const int<1>);
+    XXPERMDI_8HI vsx_xxpermdi_v8hi {}
+
+  const vsc __builtin_vsx_xxsel_16qi (vsc, vsc, vsc);
+    XXSEL_16QI vector_select_v16qi {}
+
+  const vuc __builtin_vsx_xxsel_16qi_uns (vuc, vuc, vuc);
+    XXSEL_16QI_UNS vector_select_v16qi_uns {}
+
+  const vsq __builtin_vsx_xxsel_1ti (vsq, vsq, vsq);
+    XXSEL_1TI vector_select_v1ti {}
+
+  const vuq __builtin_vsx_xxsel_1ti_uns (vuq, vuq, vuq);
+    XXSEL_1TI_UNS vector_select_v1ti_uns {}
+
+  const vd __builtin_vsx_xxsel_2df (vd, vd, vd);
+    XXSEL_2DF vector_select_v2df {}
+
+  const vsll __builtin_vsx_xxsel_2di (vsll, vsll, vsll);
+    XXSEL_2DI vector_select_v2di {}
+
+  const vull __builtin_vsx_xxsel_2di_uns (vull, vull, vull);
+    XXSEL_2DI_UNS vector_select_v2di_uns {}
+
+  const vf __builtin_vsx_xxsel_4sf (vf, vf, vf);
+    XXSEL_4SF vector_select_v4sf {}
+
+  const vsi __builtin_vsx_xxsel_4si (vsi, vsi, vsi);
+    XXSEL_4SI vector_select_v4si {}
+
+  const vui __builtin_vsx_xxsel_4si_uns (vui, vui, vui);
+    XXSEL_4SI_UNS vector_select_v4si_uns {}
+
+  const vss __builtin_vsx_xxsel_8hi (vss, vss, vss);
+    XXSEL_8HI vector_select_v8hi {}
+
+  const vus __builtin_vsx_xxsel_8hi_uns (vus, vus, vus);
+    XXSEL_8HI_UNS vector_select_v8hi_uns {}
+
+  const vsc __builtin_vsx_xxsldwi_16qi (vsc, vsc, const int<5>);
+    XXSLDWI_16QI vsx_xxsldwi_v16qi {}
+
+  const vd __builtin_vsx_xxsldwi_2df (vd, vd, const int<5>);
+    XXSLDWI_2DF vsx_xxsldwi_v2df {}
+
+  const vsll __builtin_vsx_xxsldwi_2di (vsll, vsll, const int<5>);
+    XXSLDWI_2DI vsx_xxsldwi_v2di {}
+
+  const vf __builtin_vsx_xxsldwi_4sf (vf, vf, const int<5>);
+    XXSLDWI_4SF vsx_xxsldwi_v4sf {}
+
+  const vsi __builtin_vsx_xxsldwi_4si (vsi, vsi, const int<5>);
+    XXSLDWI_4SI vsx_xxsldwi_v4si {}
+
+  const vss __builtin_vsx_xxsldwi_8hi (vss, vss, const int<5>);
+    XXSLDWI_8HI vsx_xxsldwi_v8hi {}
+
+  const vd __builtin_vsx_xxspltd_2df (vd, const int<1>);
+    XXSPLTD_V2DF vsx_xxspltd_v2df {}
+
+  const vsll __builtin_vsx_xxspltd_2di (vsll, const int<1>);
+    XXSPLTD_V2DI vsx_xxspltd_v2di {}
+
+