[05/10] Experiment with using optinfo for vectorization

Message ID 1527626483-4723-6-git-send-email-dmalcolm@redhat.com
State New
Headers show
Series
  • RFC: Prototype of compiler-assisted performance analysis
Related show

Commit Message

David Malcolm May 29, 2018, 8:41 p.m.
This patch is a mixture of hand-written changes and a lot
of search-and-replace, converting uses of dump_printf_loc
in the vectorization code to using optinfo.

...and probably a bunch of places that got touched by
the search-and-replace, but need some kind of further clean-up;
it's a prototype.

The use of VECT_OPTINFO_SCOPE means that we capture the
nested structured of what's calling what within the
vectorization code, and where the other messages fit.

gcc/ChangeLog:
	* Makefile.in (GTFILES): Add $(srcdir)/tree-vectorizer.h and
	$(srcdir)/tree-vectorizer.c.
	* gengtype.c (open_base_files): Add "tree-vectorizer.h".
	* tree-vect-data-refs.c: Port various code to use optinfo.  A lot
	of this involved simple search-and-replace.
	(dump_lower_bound): Convert to...
	(operator<<): ...this, for vec_lower_bound.
	* tree-vect-loop-manip.c: Likewise.
	* tree-vect-loop.c: Likewise.
	(emit_optinfo_at_vect_location): New function.
	* tree-vect-patterns.c: Likewise.
	* tree-vect-slp.c: Likewise.
	* tree-vect-stmts.c: Likewise.
	* tree-vectorizer.c: Likewise.
	* tree-vectorizer.h: Include "optinfo.h".
	(vect_optinfo_location): New decl.
	(emit_optinfo_at_vect_location): New decl.
	(OPTINFO_VECT): New macro.
	(OPTINFO_VECT_SUCCESS): New macro.
	(OPTINFO_VECT_FAILURE): New macro.
	(OPTINFO_VECT_NOTE): New macro.
	(VECT_OPTINFO_SCOPE): New macro.
---
 gcc/Makefile.in            |    2 +
 gcc/gengtype.c             |    2 +-
 gcc/tree-vect-data-refs.c  |  979 +++++++++++++++----------------
 gcc/tree-vect-loop-manip.c |   98 ++--
 gcc/tree-vect-loop.c       | 1390 +++++++++++++++++++++-----------------------
 gcc/tree-vect-patterns.c   |  100 ++--
 gcc/tree-vect-slp.c        |  507 ++++++++--------
 gcc/tree-vect-stmts.c      |  617 ++++++++++----------
 gcc/tree-vectorizer.c      |   42 +-
 gcc/tree-vectorizer.h      |   37 ++
 10 files changed, 1808 insertions(+), 1966 deletions(-)

-- 
1.8.5.3

Patch

diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 5ae5713..459db0f 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -2593,6 +2593,8 @@  GTFILES = $(CPP_ID_DATA_H) $(srcdir)/input.h $(srcdir)/coretypes.h \
   $(srcdir)/hsa-common.c \
   $(srcdir)/calls.c \
   $(srcdir)/optinfo.h \
+  $(srcdir)/tree-vectorizer.h \
+  $(srcdir)/tree-vectorizer.c \
   @all_gtfiles@
 
 # Compute the list of GT header files from the corresponding C sources,
diff --git a/gcc/gengtype.c b/gcc/gengtype.c
index 68455f0..06aef6c 100644
--- a/gcc/gengtype.c
+++ b/gcc/gengtype.c
@@ -1725,7 +1725,7 @@  open_base_files (void)
       "except.h", "output.h",  "cfgloop.h", "target.h", "lto-streamer.h",
       "target-globals.h", "ipa-ref.h", "cgraph.h", "symbol-summary.h",
       "ipa-prop.h", "ipa-fnsummary.h", "dwarf2out.h", "omp-offload.h",
-      "optinfo.h", NULL
+      "optinfo.h", "tree-vectorizer.h", NULL
     };
     const char *const *ifp;
     outf_p gtype_desc_c;
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index ebc56c0..b33213c 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -72,28 +72,29 @@  vect_lanes_optab_supported_p (const char *name, convert_optab optab,
       limit_p = !targetm.array_mode_supported_p (mode, count);
       if (!int_mode_for_size (bits, limit_p).exists (&array_mode))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "no array mode for %s["
-			     HOST_WIDE_INT_PRINT_DEC "]\n",
-			     GET_MODE_NAME (mode), count);
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << optinfo_printf ("no array mode for %s["
+				 HOST_WIDE_INT_PRINT_DEC "]",
+				 GET_MODE_NAME (mode), count);
 	  return false;
 	}
     }
 
   if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                         "cannot use %s<%s><%s>\n", name,
-                         GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << optinfo_printf ("cannot use %s<%s><%s>", name,
+			     GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
       return false;
     }
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
-                     GET_MODE_NAME (mode));
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << optinfo_printf ("can use %s<%s><%s>", name,
+			 GET_MODE_NAME (array_mode),
+			 GET_MODE_NAME (mode));
 
   return true;
 }
@@ -181,11 +182,12 @@  vect_check_nonzero_value (loop_vec_info loop_vinfo, tree value)
     if (checks[i] == value)
       return;
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location, "need run-time check that ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, value);
-      dump_printf (MSG_NOTE, " is nonzero\n");
+      OPTINFO_VECT_NOTE
+	<< "need run-time check that "
+	<< slim (value)
+	<< " is nonzero";
     }
   LOOP_VINFO_CHECK_NONZERO (loop_vinfo).safe_push (value);
 }
@@ -338,32 +340,26 @@  vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
       if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
 	  || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			       "versioning for alias not supported for: "
-			       "can't determine dependence between ");
-	      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				 DR_REF (dra));
-	      dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
-	      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				 DR_REF (drb));
-	      dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+	      OPTINFO_VECT_FAILURE
+		<< ("versioning for alias not supported for: "
+		    "can't determine dependence between ")
+		<< slim (DR_REF (dra))
+		<< " and "
+		<< slim (DR_REF (drb));
 	    }
 	  return true;
 	}
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			   "versioning for alias required: "
-			   "can't determine dependence between ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-			     DR_REF (dra));
-	  dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-			     DR_REF (drb));
-	  dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+	  OPTINFO_VECT_FAILURE
+	    << ("versioning for alias required: "
+		"can't determine dependence between ")
+	    << slim (DR_REF (dra))
+	    << " and "
+	    << slim (DR_REF (drb));
 	}
 
       /* Add to list of ddrs that need to be tested at run-time.  */
@@ -386,30 +382,26 @@  vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
       if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
 	  || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			       "versioning for alias not supported for: "
-			       "bad dist vector for ");
-	      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				 DR_REF (dra));
-	      dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
-	      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				 DR_REF (drb));
-	      dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+	      OPTINFO_VECT_FAILURE
+		<< ("versioning for alias not supported for: "
+		    "bad dist vector for ")
+		<< slim (DR_REF (dra))
+		<< " and "
+		<< slim (DR_REF (drb));
 	    }
 	  return true;
 	}
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
-          dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                           "versioning for alias required: "
-                           "bad dist vector for ");
-          dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
-          dump_printf (MSG_MISSED_OPTIMIZATION,  " and ");
-          dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
-          dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+          OPTINFO_VECT_FAILURE
+	    << ("versioning for alias required: "
+		"bad dist vector for ")
+	    << slim (DR_REF (dra))
+	    << " and "
+	    << slim (DR_REF (drb));
         }
       /* Add to list of ddrs that need to be tested at run-time.  */
       return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
@@ -426,20 +418,19 @@  vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
     {
       int dist = dist_v[loop_depth];
 
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-                         "dependence distance  = %d.\n", dist);
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << optinfo_printf ("dependence distance  = %d", dist);
 
       if (dist == 0)
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-	                       "dependence distance == 0 between ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
-	      dump_printf (MSG_NOTE, " and ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
-	      dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+	      OPTINFO_VECT_NOTE
+		<< "dependence distance == 0 between "
+		<< slim (DR_REF (dra))
+		<< " and "
+		<< slim (DR_REF (drb));
 	    }
 
 	  /* When we perform grouped accesses and perform implicit CSE
@@ -462,9 +453,9 @@  vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
 	     where loads from the group interleave with the store.  */
 	  if (!vect_preserves_scalar_order_p (DR_STMT (dra), DR_STMT (drb)))
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "READ_WRITE dependence in interleaving.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE
+		  << "READ_WRITE dependence in interleaving";
 	      return true;
 	    }
 
@@ -475,9 +466,9 @@  vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
 		vect_check_nonzero_value (loop_vinfo, indicator);
 	      else if (integer_zerop (indicator))
 		{
-		  if (dump_enabled_p ())
-		    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "access also has a zero step\n");
+		  if (optinfo_enabled_p ())
+		    OPTINFO_VECT_FAILURE
+		      << "access also has a zero step";
 		  return true;
 		}
 	    }
@@ -489,9 +480,9 @@  vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
 	  /* If DDR_REVERSED_P the order of the data-refs in DDR was
 	     reversed (to make distance vector positive), and the actual
 	     distance is negative.  */
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-	                     "dependence distance negative.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "dependence distance negative";
 	  /* Record a negative dependence distance to later limit the
 	     amount of stmt copying / unrolling we can perform.
 	     Only need to handle read-after-write dependence.  */
@@ -508,31 +499,30 @@  vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
 	  /* The dependence distance requires reduction of the maximal
 	     vectorization factor.  */
 	  *max_vf = abs (dist);
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location,
-	                     "adjusting maximal vectorization factor to %i\n",
-	                     *max_vf);
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE
+	      << optinfo_printf ("adjusting maximal vectorization factor to %i",
+				 *max_vf);
 	}
 
       if (abs_dist >= *max_vf)
 	{
 	  /* Dependence distance does not create dependence, as far as
 	     vectorization is concerned, in this case.  */
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location,
-	                     "dependence distance >= VF.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE
+	      << optinfo_printf ("dependence distance >= VF");
 	  continue;
 	}
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-	               "not vectorized, possible dependence "
-	               "between data-refs ");
-	  dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
-	  dump_printf (MSG_NOTE,  " and ");
-	  dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
-	  dump_printf (MSG_NOTE,  "\n");
+	  OPTINFO_VECT_FAILURE
+	    << ("not vectorized, possible dependence "
+		"between data-refs ")
+	    << slim (DR_REF (dra))
+	    << " and "
+	    << slim (DR_REF (drb));
 	}
 
       return true;
@@ -554,9 +544,7 @@  vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
   unsigned int i;
   struct data_dependence_relation *ddr;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_analyze_data_ref_dependences ===\n");
+  VECT_OPTINFO_SCOPE ("vect_analyze_data_ref_dependences");
 
   LOOP_VINFO_DDRS (loop_vinfo)
     .create (LOOP_VINFO_DATAREFS (loop_vinfo).length ()
@@ -619,24 +607,22 @@  vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
   /* Unknown data dependence.  */
   if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
     {
-      if  (dump_enabled_p ())
+      if  (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			   "can't determine dependence between ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
-	  dump_printf (MSG_MISSED_OPTIMIZATION,  " and ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
-	  dump_printf (MSG_MISSED_OPTIMIZATION,  "\n");
+	  OPTINFO_VECT_FAILURE
+	    << "can't determine dependence between "
+	    << slim (DR_REF (dra))
+	    << " and "
+	    << slim (DR_REF (drb));
 	}
     }
-  else if (dump_enabled_p ())
+  else if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-		       "determined dependence between ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
-      dump_printf (MSG_NOTE, " and ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
-      dump_printf (MSG_NOTE,  "\n");
+      OPTINFO_VECT_NOTE
+	<< "determined dependence between "
+	<< slim (DR_REF (dra))
+	<< " and "
+	<< slim (DR_REF (drb));
     }
 
   return true;
@@ -724,9 +710,7 @@  vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node,
 bool
 vect_slp_analyze_instance_dependence (slp_instance instance)
 {
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_slp_analyze_instance_dependence ===\n");
+  VECT_OPTINFO_SCOPE ("vect_slp_analyze_instance_dependence");
 
   /* The stores of this instance are at the root of the SLP tree.  */
   slp_tree store = SLP_INSTANCE_TREE (instance);
@@ -784,19 +768,16 @@  vect_record_base_alignment (vec_info *vinfo, gimple *stmt,
   if (!existed || entry->base_alignment < drb->base_alignment)
     {
       entry = drb;
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "recording new base alignment for ");
-	  dump_generic_expr (MSG_NOTE, TDF_SLIM, drb->base_address);
-	  dump_printf (MSG_NOTE, "\n");
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "  alignment:    %d\n", drb->base_alignment);
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "  misalignment: %d\n", drb->base_misalignment);
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "  based on:     ");
-	  dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
+	  OPTINFO_VECT_NOTE
+	    << "recording new base alignment for "
+	    << slim (drb->base_address)
+	    << "\n"
+	    << optinfo_printf ("  alignment:    %d\n", drb->base_alignment)
+	    << optinfo_printf ("  misalignment: %d\n", drb->base_misalignment)
+	    << optinfo_printf ("  based on:     ")
+	    << stmt;
 	}
     }
 }
@@ -867,9 +848,9 @@  vect_compute_data_ref_alignment (struct data_reference *dr)
   tree ref = DR_REF (dr);
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "vect_compute_data_ref_alignment:\n");
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << "vect_compute_data_ref_alignment:";
 
   if (loop_vinfo)
     loop = LOOP_VINFO_LOOP (loop_vinfo);
@@ -902,13 +883,13 @@  vect_compute_data_ref_alignment (struct data_reference *dr)
       step_preserves_misalignment_p
 	= (DR_STEP_ALIGNMENT (dr) % vector_alignment) == 0;
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
 	  if (step_preserves_misalignment_p)
-	    dump_printf_loc (MSG_NOTE, vect_location,
+	    OPTINFO_VECT_NOTE << optinfo_printf (
 			     "inner step divides the vector alignment.\n");
 	  else
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	    OPTINFO_VECT_FAILURE << (
 			     "inner step doesn't divide the vector"
 			     " alignment.\n");
 	}
@@ -924,8 +905,8 @@  vect_compute_data_ref_alignment (struct data_reference *dr)
       step_preserves_misalignment_p
 	= multiple_p (DR_STEP_ALIGNMENT (dr) * vf, vector_alignment);
 
-      if (!step_preserves_misalignment_p && dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (!step_preserves_misalignment_p && optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << (
 			 "step doesn't divide the vector alignment.\n");
     }
 
@@ -947,12 +928,11 @@  vect_compute_data_ref_alignment (struct data_reference *dr)
 	 negative when computing the starting misalignment below.  */
       || TREE_CODE (drb->step) != INTEGER_CST)
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-	                   "Unknown alignment for access: ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
-	  dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+	  OPTINFO_VECT_FAILURE
+	    << "Unknown alignment for access: "
+	    << slim (ref);
 	}
       return true;
     }
@@ -965,12 +945,11 @@  vect_compute_data_ref_alignment (struct data_reference *dr)
 	  || !vect_can_force_dr_alignment_p (base,
 					     vector_alignment * BITS_PER_UNIT))
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-	                       "can't force alignment of ref: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
-	      dump_printf (MSG_NOTE, "\n");
+	      OPTINFO_VECT_NOTE
+		<< "can't force alignment of ref: "
+		<< slim (ref);
 	    }
 	  return true;
 	}
@@ -978,11 +957,11 @@  vect_compute_data_ref_alignment (struct data_reference *dr)
       /* Force the alignment of the decl.
 	 NOTE: This is the only change to the code we make during
 	 the analysis phase, before deciding to vectorize the loop.  */
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
-          dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
-          dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
-          dump_printf (MSG_NOTE, "\n");
+          OPTINFO_VECT_NOTE
+	    << "force alignment of "
+	    << slim (ref);
         }
 
       DR_VECT_AUX (dr)->base_decl = base;
@@ -1004,25 +983,19 @@  vect_compute_data_ref_alignment (struct data_reference *dr)
   if (!known_misalignment (misalignment, vector_alignment,
 			   &const_misalignment))
     {
-      if (dump_enabled_p ())
-	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			   "Non-constant misalignment for access: ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
-	  dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
-	}
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "Non-constant misalignment for access: "
+	  << slim (ref);
       return true;
     }
 
   SET_DR_MISALIGNMENT (dr, const_misalignment);
 
-  if (dump_enabled_p ())
-    {
-      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                       "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
-      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
-      dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
-    }
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_FAILURE
+      << optinfo_printf ("misalign = %d bytes of ref ", DR_MISALIGNMENT (dr))
+      << slim (ref);
 
   return true;
 }
@@ -1085,9 +1058,9 @@  vect_update_misalignment_for_peel (struct data_reference *dr,
       return;
     }
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment " \
-		     "to unknown (-1).\n");
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << "Setting misalignment to unknown (-1)";
   SET_DR_MISALIGNMENT (dr, DR_MISALIGNMENT_UNKNOWN);
 }
 
@@ -1103,26 +1076,20 @@  verify_data_ref_alignment (data_reference_p dr)
     = vect_supportable_dr_alignment (dr, false);
   if (!supportable_dr_alignment)
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
+	  pending_optinfo info = OPTINFO_VECT_FAILURE;
 	  if (DR_IS_READ (dr))
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: unsupported unaligned load.");
+	    info << "not vectorized: unsupported unaligned load";
 	  else
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: unsupported unaligned "
-			     "store.");
-
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-			     DR_REF (dr));
-	  dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+	    info << "not vectorized: unsupported unaligned store";
+	  info << slim (DR_REF (dr));
 	}
       return false;
     }
 
-  if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "Vectorizing an unaligned access.\n");
+  if (supportable_dr_alignment != dr_aligned && optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE << "Vectorizing an unaligned access";
 
   return true;
 }
@@ -1216,18 +1183,17 @@  vector_alignment_reachable_p (struct data_reference *dr)
     {
       HOST_WIDE_INT elmsize =
 		int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location,
-	                   "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
-	  dump_printf (MSG_NOTE,
-	               ". misalignment = %d.\n", DR_MISALIGNMENT (dr));
+	  OPTINFO_VECT_NOTE
+	    << optinfo_printf ("data size =" HOST_WIDE_INT_PRINT_DEC, elmsize)
+	    << optinfo_printf (". misalignment = %d.\n", DR_MISALIGNMENT (dr));
 	}
       if (DR_MISALIGNMENT (dr) % elmsize)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-	                     "data size does not divide the misalignment.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << ("data size does not divide the misalignment");
 	  return false;
 	}
     }
@@ -1236,10 +1202,10 @@  vector_alignment_reachable_p (struct data_reference *dr)
     {
       tree type = TREE_TYPE (DR_REF (dr));
       bool is_packed = not_size_aligned (DR_REF (dr));
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-	                 "Unknown misalignment, %snaturally aligned\n",
-			 is_packed ? "not " : "");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << optinfo_printf ("Unknown misalignment, %snaturally aligned\n",
+			     is_packed ? "not " : "");
       return targetm.vectorize.vector_alignment_reachable (type, is_packed);
     }
 
@@ -1271,10 +1237,10 @@  vect_get_data_access_cost (struct data_reference *dr,
   else
     vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "vect_get_data_access_cost: inside_cost = %d, "
-                     "outside_cost = %d.\n", *inside_cost, *outside_cost);
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << optinfo_printf ("vect_get_data_access_cost: inside_cost = %d, "
+			 "outside_cost = %d.\n", *inside_cost, *outside_cost);
 }
 
 
@@ -1662,9 +1628,7 @@  vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
   unsigned int mis, same_align_drs_max = 0;
   hash_table<peel_info_hasher> peeling_htab (1);
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_enhance_data_refs_alignment ===\n");
+  VECT_OPTINFO_SCOPE ("vect_enhance_data_refs_alignment");
 
   /* Reset data so we can safely be called multiple times.  */
   LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
@@ -1828,8 +1792,8 @@  vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
         {
           if (!aligned_access_p (dr))
             {
-              if (dump_enabled_p ())
-                dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+              if (optinfo_enabled_p ())
+                OPTINFO_VECT_FAILURE << (
                                  "vector alignment may not be reachable\n");
               break;
             }
@@ -2022,8 +1986,8 @@  vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
 	  if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
 	    npeel /= GROUP_SIZE (stmt_info);
 
-          if (dump_enabled_p ())
-            dump_printf_loc (MSG_NOTE, vect_location,
+          if (optinfo_enabled_p ())
+            OPTINFO_VECT_NOTE << optinfo_printf (
                              "Try peeling by %d\n", npeel);
         }
 
@@ -2057,8 +2021,8 @@  vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
               if (max_peel > max_allowed_peel)
                 {
                   do_peeling = false;
-                  if (dump_enabled_p ())
-                    dump_printf_loc (MSG_NOTE, vect_location,
+                  if (optinfo_enabled_p ())
+                    OPTINFO_VECT_NOTE << optinfo_printf (
                         "Disable peeling, max peels reached: %d\n", max_peel);
                 }
             }
@@ -2107,11 +2071,11 @@  vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
             LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
 	      = DR_MISALIGNMENT (dr0);
 	  SET_DR_MISALIGNMENT (dr0, 0);
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
             {
-              dump_printf_loc (MSG_NOTE, vect_location,
+              OPTINFO_VECT_NOTE << optinfo_printf (
                                "Alignment of access forced using peeling.\n");
-              dump_printf_loc (MSG_NOTE, vect_location,
+              OPTINFO_VECT_NOTE << optinfo_printf (
                                "Peeling for alignment will be applied.\n");
             }
 
@@ -2232,13 +2196,13 @@  vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
           stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
           dr = STMT_VINFO_DATA_REF (stmt_info);
 	  SET_DR_MISALIGNMENT (dr, 0);
-	  if (dump_enabled_p ())
-            dump_printf_loc (MSG_NOTE, vect_location,
+	  if (optinfo_enabled_p ())
+            OPTINFO_VECT_NOTE << optinfo_printf (
                              "Alignment of access forced using versioning.\n");
         }
 
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_NOTE << optinfo_printf (
                          "Versioning for alignment will be applied.\n");
 
       /* Peeling and versioning can't be done together at this time.  */
@@ -2300,14 +2264,13 @@  vect_find_same_alignment_drs (struct data_dependence_relation *ddr)
 
   STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
   STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-		       "accesses have the same alignment: ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
-      dump_printf (MSG_NOTE,  " and ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
-      dump_printf (MSG_NOTE, "\n");
+      OPTINFO_VECT_NOTE
+	<< "accesses have the same alignment: "
+	<< slim (DR_REF (dra))
+	<< " and "
+	<< slim (DR_REF (drb));
     }
 }
 
@@ -2320,9 +2283,7 @@  vect_find_same_alignment_drs (struct data_dependence_relation *ddr)
 bool
 vect_analyze_data_refs_alignment (loop_vec_info vinfo)
 {
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_analyze_data_refs_alignment ===\n");
+  VECT_OPTINFO_SCOPE ("vect_analyze_data_refs_alignment");
 
   /* Mark groups of data references with same alignment using
      data dependence information.  */
@@ -2349,10 +2310,10 @@  vect_analyze_data_refs_alignment (loop_vec_info vinfo)
 	      && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
 	    continue;
 
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: can't calculate alignment "
-			     "for data ref.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << ("not vectorized: can't calculate alignment "
+		  "for data ref");
 
 	  return false;
 	}
@@ -2383,10 +2344,10 @@  vect_slp_analyze_and_verify_node_alignment (slp_tree node)
 	  && ! vect_compute_data_ref_alignment (first_dr))
       || ! verify_data_ref_alignment (dr))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "not vectorized: bad data alignment in basic "
-			 "block.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << ("not vectorized: bad data alignment in basic "
+	      "block");
       return false;
     }
 
@@ -2401,9 +2362,7 @@  vect_slp_analyze_and_verify_node_alignment (slp_tree node)
 bool
 vect_slp_analyze_and_verify_instance_alignment (slp_instance instance)
 {
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_slp_analyze_and_verify_instance_alignment ===\n");
+  VECT_OPTINFO_SCOPE ("vect_slp_analyze_and_verify_instance_alignment");
 
   slp_tree node;
   unsigned i;
@@ -2455,15 +2414,13 @@  vect_analyze_group_access_1 (struct data_reference *dr)
 	 simply not include that gap.  */
       if ((dr_step % type_size) != 0)
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-	                       "Step ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
-	      dump_printf (MSG_NOTE,
-			   " is not a multiple of the element size for ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
-	      dump_printf (MSG_NOTE, "\n");
+	      OPTINFO_VECT_NOTE
+		<< "Step "
+		<< slim (step)
+		<< " is not a multiple of the element size for "
+		<< slim (DR_REF (dr));
 	    }
 	  return false;
 	}
@@ -2487,24 +2444,23 @@  vect_analyze_group_access_1 (struct data_reference *dr)
 	  GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
 	  GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
 	  GROUP_GAP (stmt_info) = groupsize - 1;
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-	                       "Detected single element interleaving ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
-	      dump_printf (MSG_NOTE, " step ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
-	      dump_printf (MSG_NOTE, "\n");
+	      OPTINFO_VECT_NOTE
+		<< "Detected single element interleaving "
+		<< slim (DR_REF (dr))
+		<< " step "
+		<< slim (step);
 	    }
 
 	  return true;
 	}
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
- 	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-	                   "not consecutive access ");
-	  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+ 	  OPTINFO_VECT_FAILURE
+	    << "not consecutive access "
+	    << stmt;
         }
 
       if (bb_vinfo)
@@ -2514,7 +2470,8 @@  vect_analyze_group_access_1 (struct data_reference *dr)
           return true;
         }
 
-      dump_printf_loc (MSG_NOTE, vect_location, "using strided accesses\n");
+      OPTINFO_VECT_NOTE
+	<< "using strided accesses";
       STMT_VINFO_STRIDED_P (stmt_info) = true;
       return true;
     }
@@ -2542,15 +2499,15 @@  vect_analyze_group_access_1 (struct data_reference *dr)
             {
               if (DR_IS_WRITE (data_ref))
                 {
-                  if (dump_enabled_p ())
-                    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                                     "Two store stmts share the same dr.\n");
+                  if (optinfo_enabled_p ())
+                    OPTINFO_VECT_FAILURE
+		      << "Two store stmts share the same dr";
                   return false;
                 }
 
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "Two or more load stmts share the same dr.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE
+		  << "Two or more load stmts share the same dr";
 
               /* For load use the same data-ref load.  */
               GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
@@ -2576,9 +2533,9 @@  vect_analyze_group_access_1 (struct data_reference *dr)
 	      slp_impossible = true;
 	      if (DR_IS_WRITE (data_ref))
 		{
-                  if (dump_enabled_p ())
-                    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                                     "interleaved store with gaps\n");
+                  if (optinfo_enabled_p ())
+                    OPTINFO_VECT_FAILURE
+		      << "interleaved store with gaps";
 		  return false;
 		}
 
@@ -2604,8 +2561,8 @@  vect_analyze_group_access_1 (struct data_reference *dr)
          inefficient way we have to cap earlier.  See PR78699 for example.  */
       if (groupsize > 4096)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << (
 			     "group is too large\n");
 	  return false;
 	}
@@ -2615,8 +2572,8 @@  vect_analyze_group_access_1 (struct data_reference *dr)
       if (groupsize != count
 	  && !DR_IS_READ (dr))
         {
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << (
 			     "interleaved store with gaps\n");
 	  return false;
 	}
@@ -2628,9 +2585,9 @@  vect_analyze_group_access_1 (struct data_reference *dr)
       GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element;
 
       GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location,
+	  OPTINFO_VECT_NOTE << optinfo_printf (
 			   "Detected interleaving ");
 	  if (DR_IS_READ (dr))
 	    dump_printf (MSG_NOTE, "load ");
@@ -2640,7 +2597,7 @@  vect_analyze_group_access_1 (struct data_reference *dr)
 		       (unsigned)groupsize);
 	  dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
 	  if (GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
-	    dump_printf_loc (MSG_NOTE, vect_location,
+	    OPTINFO_VECT_NOTE << optinfo_printf (
 			     "There is a gap of %u elements after the group\n",
 			     GROUP_GAP (vinfo_for_stmt (stmt)));
 	}
@@ -2707,8 +2664,8 @@  vect_analyze_data_ref_access (struct data_reference *dr)
 
   if (loop_vinfo && !step)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << (
 	                 "bad data-ref access in loop\n");
       return false;
     }
@@ -2724,8 +2681,8 @@  vect_analyze_data_ref_access (struct data_reference *dr)
 	 loop-carried dependencies between inner loop iterations.  */
       if (loop->safelen < 2)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE << optinfo_printf (
 			     "zero step in inner loop of nest\n");
 	  return false;
 	}
@@ -2741,8 +2698,8 @@  vect_analyze_data_ref_access (struct data_reference *dr)
       step = STMT_VINFO_DR_STEP (stmt_info);
       if (integer_zerop (step))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE << optinfo_printf (
 	                     "zero step in outer loop.\n");
 	  return DR_IS_READ (dr);
 	}
@@ -2764,8 +2721,8 @@  vect_analyze_data_ref_access (struct data_reference *dr)
 
   if (loop && nested_in_vect_loop_p (loop, stmt))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE << optinfo_printf (
 	                 "grouped access in outer loop.\n");
       return false;
     }
@@ -2906,9 +2863,7 @@  vect_analyze_data_ref_accesses (vec_info *vinfo)
   vec<data_reference_p> datarefs = vinfo->datarefs;
   struct data_reference *dr;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_analyze_data_ref_accesses ===\n");
+  VECT_OPTINFO_SCOPE ("vect_analyze_data_ref_accesses");
 
   if (datarefs.is_empty ())
     return true;
@@ -3030,18 +2985,17 @@  vect_analyze_data_ref_accesses (vec_info *vinfo)
 		break;
 	    }
 
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-			       "Detected interleaving ");
+	      pending_optinfo info = OPTINFO_VECT_NOTE
+		<< "Detected interleaving ";
 	      if (DR_IS_READ (dra))
-		dump_printf (MSG_NOTE, "load ");
+		info << "load ";
 	      else
-		dump_printf (MSG_NOTE, "store ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
-	      dump_printf (MSG_NOTE,  " and ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
-	      dump_printf (MSG_NOTE, "\n");
+		info << "store ";
+	      info << slim (DR_REF (dra));
+	      info << " and ";
+	      info << slim (DR_REF (drb));
 	    }
 
 	  /* Link the found element into the group list.  */
@@ -3060,8 +3014,8 @@  vect_analyze_data_ref_accesses (vec_info *vinfo)
     if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) 
         && !vect_analyze_data_ref_access (dr))
       {
-	if (dump_enabled_p ())
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	if (optinfo_enabled_p ())
+	  OPTINFO_VECT_FAILURE << (
 	                   "not vectorized: complicated access pattern.\n");
 
         if (is_a <bb_vec_info> (vinfo))
@@ -3213,28 +3167,30 @@  dependence_distance_ge_vf (data_dependence_relation *ddr,
 	return false;
     }
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-		       "dependence distance between ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
-      dump_printf (MSG_NOTE,  " and ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
-      dump_printf (MSG_NOTE,  " is >= VF\n");
+      OPTINFO_VECT_NOTE
+	<< "dependence distance between "
+	<< slim (DR_REF (DDR_A (ddr)))
+	<< " and "
+	<< slim (DR_REF (DDR_B (ddr)))
+	<< " is >= VF";
     }
 
   return true;
 }
 
-/* Dump LOWER_BOUND using flags DUMP_KIND.  Dumps are known to be enabled.  */
+/* Add a description of LOWER_BOUND to INFO.  */
 
-static void
-dump_lower_bound (dump_flags_t dump_kind, const vec_lower_bound &lower_bound)
+static pending_optinfo &
+operator<< (pending_optinfo &info, const vec_lower_bound &lower_bound)
 {
-  dump_printf (dump_kind, "%s (", lower_bound.unsigned_p ? "unsigned" : "abs");
-  dump_generic_expr (dump_kind, TDF_SLIM, lower_bound.expr);
-  dump_printf (dump_kind, ") >= ");
-  dump_dec (dump_kind, lower_bound.min_value);
+  return info
+    << optinfo_printf ("%s (",
+		       lower_bound.unsigned_p ? "unsigned" : "abs")
+    << slim (lower_bound.expr)
+    << ") >= "
+    << lower_bound.min_value;
 }
 
 /* Record that the vectorized loop requires the vec_lower_bound described
@@ -3255,23 +3211,22 @@  vect_check_lower_bound (loop_vec_info loop_vinfo, tree expr, bool unsigned_p,
 	  {
 	    lower_bounds[i].unsigned_p = unsigned_p;
 	    lower_bounds[i].min_value = min_value;
-	    if (dump_enabled_p ())
+	    if (optinfo_enabled_p ())
 	      {
-		dump_printf_loc (MSG_NOTE, vect_location,
-				 "updating run-time check to ");
-		dump_lower_bound (MSG_NOTE, lower_bounds[i]);
-		dump_printf (MSG_NOTE, "\n");
+		OPTINFO_VECT_NOTE
+		  << "updating run-time check to "
+		  << lower_bounds[i];
 	      }
 	  }
 	return;
       }
 
   vec_lower_bound lower_bound (expr, unsigned_p, min_value);
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location, "need a run-time check that ");
-      dump_lower_bound (MSG_NOTE, lower_bound);
-      dump_printf (MSG_NOTE, "\n");
+      OPTINFO_VECT_NOTE
+	<< "need a run-time check that "
+	<< lower_bound;
     }
   LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).safe_push (lower_bound);
 }
@@ -3353,9 +3308,7 @@  vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
   unsigned int i;
   tree length_factor;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_prune_runtime_alias_test_list ===\n");
+  VECT_OPTINFO_SCOPE ("vect_prune_runtime_alias_test_list");
 
   /* Step values are irrelevant for aliasing if the number of vector
      iterations is equal to the number of scalar iterations (which can
@@ -3401,13 +3354,14 @@  vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
 	  vec_object_pair new_pair (DDR_OBJECT_A (ddr), DDR_OBJECT_B (ddr));
 	  if (!compared_objects.add (new_pair))
 	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_NOTE, vect_location, "checking that ");
-		  dump_generic_expr (MSG_NOTE, TDF_SLIM, new_pair.first);
-		  dump_printf (MSG_NOTE, " and ");
-		  dump_generic_expr (MSG_NOTE, TDF_SLIM, new_pair.second);
-		  dump_printf (MSG_NOTE, " have different addresses\n");
+		  OPTINFO_VECT_NOTE
+		    << "checking that "
+		    << slim (new_pair.first)
+		    << " and "
+		    << slim (new_pair.second)
+		    << " have different addresses";
 		}
 	      LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).safe_push (new_pair);
 	    }
@@ -3426,14 +3380,14 @@  vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
 	  && (vect_preserves_scalar_order_p (stmt_a, stmt_b)
 	      || vectorizable_with_step_bound_p (dr_a, dr_b, &lower_bound)))
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-			       "no need for alias check between ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a));
-	      dump_printf (MSG_NOTE, " and ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b));
-	      dump_printf (MSG_NOTE, " when VF is 1\n");
+	      OPTINFO_VECT_NOTE
+		<< "no need for alias check between "
+		<< slim (DR_REF (dr_a))
+		<< " and "
+		<< slim (DR_REF (dr_b))
+		<< " when VF is 1";
 	    }
 	  continue;
 	}
@@ -3449,25 +3403,21 @@  vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
 	      || vect_small_gap_p (loop_vinfo, dr_b, lower_bound)))
 	{
 	  bool unsigned_p = dr_known_forward_stride_p (dr_a);
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location, "no alias between ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a));
-	      dump_printf (MSG_NOTE, " and ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b));
-	      dump_printf (MSG_NOTE, " when the step ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_STEP (dr_a));
-	      dump_printf (MSG_NOTE, " is outside ");
+	      pending_optinfo info = OPTINFO_VECT_NOTE
+		<< optinfo_printf ( "no alias between ")
+		<< slim (DR_REF (dr_a))
+		<< " and "
+		<< slim (DR_REF (dr_b))
+		<< " when the step "
+		<< slim (DR_STEP (dr_a))
+		<< " is outside ";
 	      if (unsigned_p)
-		dump_printf (MSG_NOTE, "[0");
+		info << "[0";
 	      else
-		{
-		  dump_printf (MSG_NOTE, "(");
-		  dump_dec (MSG_NOTE, poly_int64 (-lower_bound));
-		}
-	      dump_printf (MSG_NOTE, ", ");
-	      dump_dec (MSG_NOTE, lower_bound);
-	      dump_printf (MSG_NOTE, ")\n");
+		info << "(" << poly_int64 (-lower_bound);
+	      info << ", " << lower_bound << ")";
 	    }
 	  vect_check_lower_bound (loop_vinfo, DR_STEP (dr_a), unsigned_p,
 				  lower_bound);
@@ -3525,17 +3475,17 @@  vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
 					     segment_length_b,
 					     access_size_a,
 					     access_size_b);
-	  if (res >= 0 && dump_enabled_p ())
+	  if (res >= 0 && optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-			       "can tell at compile time that ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a));
-	      dump_printf (MSG_NOTE, " and ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b));
+	      pending_optinfo info = OPTINFO_VECT_NOTE
+		<< "can tell at compile time that "
+		<< slim (DR_REF (dr_a))
+		<< " and "
+		<< slim (DR_REF (dr_b));
 	      if (res == 0)
-		dump_printf (MSG_NOTE, " do not alias\n");
+		info << " do not alias";
 	      else
-		dump_printf (MSG_NOTE, " alias\n");
+		info << " alias";
 	    }
 
 	  if (res == 0)
@@ -3543,9 +3493,9 @@  vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
 
 	  if (res == 1)
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_NOTE, vect_location,
-				 "not vectorized: compilation time alias.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_NOTE
+		  << "not vectorized: compilation time alias";
 	      return false;
 	    }
 	}
@@ -3566,17 +3516,19 @@  vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
   unsigned int count = (comp_alias_ddrs.length ()
 			+ check_unequal_addrs.length ());
 
-  dump_printf_loc (MSG_NOTE, vect_location,
+  OPTINFO_VECT_NOTE << optinfo_printf (
 		   "improved number of alias checks from %d to %d\n",
 		   may_alias_ddrs.length (), count);
   if ((int) count > PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "number of versioning for alias "
-			 "run-time tests exceeds %d "
-			 "(--param vect-max-version-for-alias-checks)\n",
-			 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
+      if (optinfo_enabled_p ())
+	// FIXME: would be nice to have special highlighting for
+	// command-line options (e.g. params)
+	OPTINFO_VECT_FAILURE
+	  << optinfo_printf ("number of versioning for alias "
+			     "run-time tests exceeds %d "
+			     "(--param vect-max-version-for-alias-checks)",
+			     PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
       return false;
     }
 
@@ -3951,9 +3903,7 @@  vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
   struct data_reference *dr;
   tree scalar_type;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "=== vect_analyze_data_refs ===\n");
+  VECT_OPTINFO_SCOPE ("vect_analyze_data_refs");
 
   if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
     loop = LOOP_VINFO_LOOP (loop_vinfo);
@@ -3974,8 +3924,8 @@  vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
 again:
       if (!dr || !DR_REF (dr))
         {
-          if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+          if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << (
 	                     "not vectorized: unhandled data-ref\n");
           return false;
         }
@@ -4094,12 +4044,12 @@  again:
 
 	  if (gatherscatter == SG_NONE && !simd_lane_access)
 	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                                   "not vectorized: data ref analysis "
-                                   "failed ");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+		  OPTINFO_VECT_FAILURE
+		    << ("not vectorized: data ref analysis "
+			"failed ")
+		    << stmt;
 		}
 
 	      if (is_a <bb_vec_info> (vinfo))
@@ -4111,8 +4061,8 @@  again:
 
       if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
         {
-          if (dump_enabled_p ())
-            dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+          if (optinfo_enabled_p ())
+            OPTINFO_VECT_FAILURE << (
                              "not vectorized: base addr of dr is a "
                              "constant\n");
 
@@ -4126,11 +4076,11 @@  again:
 
       if (TREE_THIS_VOLATILE (DR_REF (dr)))
         {
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
             {
-              dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                               "not vectorized: volatile type ");
-              dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+              OPTINFO_VECT_FAILURE
+		<< "not vectorized: volatile type "
+		<< stmt;
             }
 
           if (is_a <bb_vec_info> (vinfo))
@@ -4141,12 +4091,11 @@  again:
 
       if (stmt_can_throw_internal (stmt))
         {
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
             {
-              dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                               "not vectorized: statement can throw an "
-                               "exception ");
-              dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+              OPTINFO_VECT_FAILURE
+		<< "not vectorized: statement can throw an exception "
+		<< stmt;
             }
 
           if (is_a <bb_vec_info> (vinfo))
@@ -4160,12 +4109,12 @@  again:
       if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
 	  && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
 	{
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
             {
-              dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                               "not vectorized: statement is bitfield "
-                               "access ");
-              dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+              OPTINFO_VECT_FAILURE
+		<< ("not vectorized: statement is bitfield "
+		    "access ")
+		<< stmt;
             }
 
           if (is_a <bb_vec_info> (vinfo))
@@ -4185,11 +4134,11 @@  again:
 	      || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD
 		  && gimple_call_internal_fn (stmt) != IFN_MASK_STORE)))
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION,  vect_location,
-	                       "not vectorized: dr in a call ");
-	      dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+              OPTINFO_VECT_FAILURE
+		<< "not vectorized: dr in a call "
+		<< stmt;
 	    }
 
 	  if (is_a <bb_vec_info> (vinfo))
@@ -4219,12 +4168,11 @@  again:
 	  tree init_addr = fold_build_pointer_plus (base, init_offset);
 	  tree init_ref = build_fold_indirect_ref (init_addr);
 
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-                               "analyze in outer loop: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, init_ref);
-	      dump_printf (MSG_NOTE, "\n");
+	      OPTINFO_VECT_NOTE
+		<< "analyze in outer loop: "
+		<< slim (init_ref);
 	    }
 
 	  if (!dr_analyze_innermost (&STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info),
@@ -4232,41 +4180,36 @@  again:
 	    /* dr_analyze_innermost already explained the failure.  */
 	    return false;
 
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-                               "\touter base_address: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM,
-                                 STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
-	      dump_printf (MSG_NOTE, "\n\touter offset from base address: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM,
-                                 STMT_VINFO_DR_OFFSET (stmt_info));
-	      dump_printf (MSG_NOTE,
-                           "\n\touter constant offset from base address: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM,
-                                 STMT_VINFO_DR_INIT (stmt_info));
-	      dump_printf (MSG_NOTE, "\n\touter step: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM,
-                                 STMT_VINFO_DR_STEP (stmt_info));
-	      dump_printf (MSG_NOTE, "\n\touter base alignment: %d\n",
-			   STMT_VINFO_DR_BASE_ALIGNMENT (stmt_info));
-	      dump_printf (MSG_NOTE, "\n\touter base misalignment: %d\n",
-			   STMT_VINFO_DR_BASE_MISALIGNMENT (stmt_info));
-	      dump_printf (MSG_NOTE, "\n\touter offset alignment: %d\n",
-			   STMT_VINFO_DR_OFFSET_ALIGNMENT (stmt_info));
-	      dump_printf (MSG_NOTE, "\n\touter step alignment: %d\n",
-			   STMT_VINFO_DR_STEP_ALIGNMENT (stmt_info));
+	      OPTINFO_VECT_NOTE
+		<< "\touter base_address: "
+		<< slim (STMT_VINFO_DR_BASE_ADDRESS (stmt_info))
+		<< "\n\touter offset from base address: "
+		<< slim (STMT_VINFO_DR_OFFSET (stmt_info))
+		<< "\n\touter constant offset from base address: "
+		<< slim (STMT_VINFO_DR_INIT (stmt_info))
+		<< "\n\touter step: "
+		<< slim (STMT_VINFO_DR_STEP (stmt_info))
+		<< optinfo_printf ("\n\touter base alignment: %d\n",
+				   STMT_VINFO_DR_BASE_ALIGNMENT (stmt_info))
+		<< optinfo_printf ("\n\touter base misalignment: %d\n",
+				   STMT_VINFO_DR_BASE_MISALIGNMENT (stmt_info))
+		<< optinfo_printf ("\n\touter offset alignment: %d\n",
+				   STMT_VINFO_DR_OFFSET_ALIGNMENT (stmt_info))
+		<< optinfo_printf ("\n\touter step alignment: %d\n",
+				   STMT_VINFO_DR_STEP_ALIGNMENT (stmt_info));
 	    }
 	}
 
       if (STMT_VINFO_DATA_REF (stmt_info))
         {
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
             {
-              dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                               "not vectorized: more than one data ref "
-                               "in stmt: ");
-              dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+              OPTINFO_VECT_FAILURE
+		<< ("not vectorized: more than one data ref "
+		    "in stmt: ")
+		<< stmt;
             }
 
           if (is_a <bb_vec_info> (vinfo))
@@ -4289,12 +4232,12 @@  again:
 	  && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (dr), 0))
 	  && DECL_NONALIASED (TREE_OPERAND (DR_BASE_ADDRESS (dr), 0)))
 	{
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
             {
-              dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                               "not vectorized: base object not addressable "
-			       "for stmt: ");
-              dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+              OPTINFO_VECT_FAILURE
+		<< ("not vectorized: base object not addressable "
+		    "for stmt: ")
+		<< stmt;
             }
           if (is_a <bb_vec_info> (vinfo))
 	    {
@@ -4312,15 +4255,13 @@  again:
 	= get_vectype_for_scalar_type (scalar_type);
       if (!STMT_VINFO_VECTYPE (stmt_info))
         {
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
             {
-              dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                               "not vectorized: no vectype for stmt: ");
-              dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
-              dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
-              dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
-                                 scalar_type);
-              dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+              OPTINFO_VECT_FAILURE
+		<< "not vectorized: no vectype for stmt: "
+		<< stmt
+		<< " scalar_type: "
+		<< details (scalar_type);
             }
 
           if (is_a <bb_vec_info> (vinfo))
@@ -4341,14 +4282,12 @@  again:
         }
       else
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-			       "got vectype for stmt: ");
-	      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM,
-				 STMT_VINFO_VECTYPE (stmt_info));
-	      dump_printf (MSG_NOTE, "\n");
+	      OPTINFO_VECT_NOTE
+		<< "got vectype for stmt: "
+		<< stmt
+		<< slim (STMT_VINFO_VECTYPE (stmt_info));
 	    }
 	}
 
@@ -4366,15 +4305,15 @@  again:
 	    {
 	      STMT_VINFO_DATA_REF (stmt_info) = NULL;
 	      free_data_ref (dr);
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				   (gatherscatter == GATHER) ?
-				   "not vectorized: not suitable for gather "
-				   "load " :
-				   "not vectorized: not suitable for scatter "
-				   "store ");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+		  OPTINFO_VECT_FAILURE
+		    << ((gatherscatter == GATHER) ?
+			"not vectorized: not suitable for gather "
+			"load " :
+			"not vectorized: not suitable for scatter "
+			"store ")
+		    << stmt;
 		}
 	      return false;
 	    }
@@ -4389,12 +4328,12 @@  again:
 	{
 	  if (nested_in_vect_loop_p (loop, stmt))
 	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
-                                   "not vectorized: not suitable for strided "
-                                   "load ");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+		  OPTINFO_VECT_FAILURE
+		    << ("not vectorized: not suitable for strided "
+			"load ")
+		    << stmt;
 		}
 	      return false;
 	    }
@@ -4621,11 +4560,11 @@  vect_create_addr_base_for_vector_ref (gimple *stmt,
 	mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
     }
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location, "created ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
-      dump_printf (MSG_NOTE, "\n");
+      OPTINFO_VECT_NOTE
+	<< "created "
+	<< slim (addr_base);
     }
 
   return addr_base;
@@ -4748,23 +4687,22 @@  vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop,
      in LOOP.  */
   base_name = get_name (DR_BASE_ADDRESS (dr));
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
       tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
-      dump_printf_loc (MSG_NOTE, vect_location,
-                       "create %s-pointer variable to type: ",
-		       get_tree_code_name (TREE_CODE (aggr_type)));
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type);
+      pending_optinfo info = OPTINFO_VECT_NOTE
+	<< optinfo_printf ("create %s-pointer variable to type: ",
+			   get_tree_code_name (TREE_CODE (aggr_type)))
+	<< slim (aggr_type);
       if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
-        dump_printf (MSG_NOTE, "  vectorizing an array ref: ");
+	info << "  vectorizing an array ref: ";
       else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
-        dump_printf (MSG_NOTE, "  vectorizing a vector ref: ");
+	info << "  vectorizing a vector ref: ";
       else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
-        dump_printf (MSG_NOTE, "  vectorizing a record based array ref: ");
+	info << "  vectorizing a record based array ref: ";
       else
-        dump_printf (MSG_NOTE, "  vectorizing a pointer ref: ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
-      dump_printf (MSG_NOTE, "\n");
+	info << "  vectorizing a pointer ref: ";
+      info << slim (DR_BASE_OBJECT (dr));
     }
 
   /* (1) Create the new aggregate-pointer variable.
@@ -5078,10 +5016,10 @@  vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
      be a power of two.  */
   if (count != 3 && exact_log2 (count) == -1)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "the size of the group of accesses"
-			 " is not a power of 2 or not eqaul to 3\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << ("the size of the group of accesses"
+	      " is not a power of 2 or not equal to 3");
       return false;
     }
 
@@ -5097,10 +5035,10 @@  vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
 	  unsigned int nelt;
 	  if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "cannot handle groups of 3 stores for"
-				 " variable-length vectors\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE
+		  << ("cannot handle groups of 3 stores for"
+		      " variable-length vectors");
 	      return false;
 	    }
 
@@ -5124,9 +5062,9 @@  vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
 	      indices.new_vector (sel, 2, nelt);
 	      if (!can_vec_perm_const_p (mode, indices))
 		{
-		  if (dump_enabled_p ())
-		    dump_printf (MSG_MISSED_OPTIMIZATION,
-				 "permutation op not supported by target.\n");
+		  if (optinfo_enabled_p ())
+		    OPTINFO_VECT_FAILURE
+		      << "permutation op not supported by target";
 		  return false;
 		}
 
@@ -5142,9 +5080,9 @@  vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
 	      indices.new_vector (sel, 2, nelt);
 	      if (!can_vec_perm_const_p (mode, indices))
 		{
-		  if (dump_enabled_p ())
-		    dump_printf (MSG_MISSED_OPTIMIZATION,
-				 "permutation op not supported by target.\n");
+		  if (optinfo_enabled_p ())
+		    OPTINFO_VECT_FAILURE
+		      << "permutation op not supported by target";
 		  return false;
 		}
 	    }
@@ -5176,9 +5114,9 @@  vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
 	}
     }
 
-  if (dump_enabled_p ())
-    dump_printf (MSG_MISSED_OPTIMIZATION,
-		 "permutaion op not supported by target.\n");
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_FAILURE
+      << "permutation op not supported by target";
   return false;
 }
 
@@ -5689,8 +5627,8 @@  vect_grouped_load_supported (tree vectype, bool single_element_p,
      see PR65518).  */
   if (single_element_p && maybe_gt (count, TYPE_VECTOR_SUBPARTS (vectype)))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << (
 			 "single-element interleaving not supported "
 			 "for not adjacent vector loads\n");
       return false;
@@ -5700,8 +5638,8 @@  vect_grouped_load_supported (tree vectype, bool single_element_p,
      be a power of two.  */
   if (count != 3 && exact_log2 (count) == -1)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << (
 			 "the size of the group of accesses"
 			 " is not a power of 2 or not equal to 3\n");
       return false;
@@ -5716,8 +5654,8 @@  vect_grouped_load_supported (tree vectype, bool single_element_p,
 	  unsigned int nelt;
 	  if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE << (
 				 "cannot handle groups of 3 loads for"
 				 " variable-length vectors\n");
 	      return false;
@@ -5737,8 +5675,8 @@  vect_grouped_load_supported (tree vectype, bool single_element_p,
 	      indices.new_vector (sel, 2, nelt);
 	      if (!can_vec_perm_const_p (mode, indices))
 		{
-		  if (dump_enabled_p ())
-		    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+		  if (optinfo_enabled_p ())
+		    OPTINFO_VECT_FAILURE << (
 				     "shuffle of 3 loads is not supported by"
 				     " target\n");
 		  return false;
@@ -5751,8 +5689,8 @@  vect_grouped_load_supported (tree vectype, bool single_element_p,
 	      indices.new_vector (sel, 2, nelt);
 	      if (!can_vec_perm_const_p (mode, indices))
 		{
-		  if (dump_enabled_p ())
-		    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+		  if (optinfo_enabled_p ())
+		    OPTINFO_VECT_FAILURE << (
 				     "shuffle of 3 loads is not supported by"
 				     " target\n");
 		  return false;
@@ -5783,8 +5721,8 @@  vect_grouped_load_supported (tree vectype, bool single_element_p,
         }
     }
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_FAILURE << (
 		     "extract even/odd not supported by target\n");
   return false;
 }
@@ -6126,8 +6064,8 @@  vect_shift_permute_load_chain (vec<tree> dr_chain,
       vec_perm_indices indices (sel, 2, nelt);
       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << (
 			     "shuffle of 2 fields structure is not \
 			      supported by target\n");
 	  return false;
@@ -6141,8 +6079,8 @@  vect_shift_permute_load_chain (vec<tree> dr_chain,
       indices.new_vector (sel, 2, nelt);
       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << (
 			     "shuffle of 2 fields structure is not \
 			      supported by target\n");
 	  return false;
@@ -6156,8 +6094,8 @@  vect_shift_permute_load_chain (vec<tree> dr_chain,
       indices.new_vector (sel, 2, nelt);
       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << (
 			     "shift permutation is not supported by target\n");
 	  return false;
 	}
@@ -6172,8 +6110,8 @@  vect_shift_permute_load_chain (vec<tree> dr_chain,
       indices.new_vector (sel, 2, nelt);
       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << (
 			     "select is not supported by target\n");
 	  return false;
 	}
@@ -6236,10 +6174,9 @@  vect_shift_permute_load_chain (vec<tree> dr_chain,
       vec_perm_indices indices (sel, 2, nelt);
       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "shuffle of 3 fields structure is not \
-			      supported by target\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "shuffle of 3 fields structure is not supported by target";
 	  return false;
 	}
       perm3_mask = vect_gen_perm_mask_checked (vectype, indices);
@@ -6251,9 +6188,9 @@  vect_shift_permute_load_chain (vec<tree> dr_chain,
       indices.new_vector (sel, 2, nelt);
       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "shift permutation is not supported by target\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "shift permutation is not supported by target";
 	  return false;
 	}
       shift1_mask = vect_gen_perm_mask_checked (vectype, indices);
@@ -6265,9 +6202,9 @@  vect_shift_permute_load_chain (vec<tree> dr_chain,
       indices.new_vector (sel, 2, nelt);
       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "shift permutation is not supported by target\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "shift permutation is not supported by target";
 	  return false;
 	}
       shift2_mask = vect_gen_perm_mask_checked (vectype, indices);
@@ -6279,9 +6216,9 @@  vect_shift_permute_load_chain (vec<tree> dr_chain,
       indices.new_vector (sel, 2, nelt);
       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "shift permutation is not supported by target\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "shift permutation is not supported by target";
 	  return false;
 	}
       shift3_mask = vect_gen_perm_mask_checked (vectype, indices);
@@ -6293,9 +6230,9 @@  vect_shift_permute_load_chain (vec<tree> dr_chain,
       indices.new_vector (sel, 2, nelt);
       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "shift permutation is not supported by target\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "shift permutation is not supported by target";
 	  return false;
 	}
       shift4_mask = vect_gen_perm_mask_checked (vectype, indices);
diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c
index e82c1fe..fb4b71a 100644
--- a/gcc/tree-vect-loop-manip.c
+++ b/gcc/tree-vect-loop-manip.c
@@ -938,10 +938,11 @@  vect_set_loop_condition (struct loop *loop, loop_vec_info loop_vinfo,
   gsi_remove (&loop_cond_gsi, true);
   free_stmt_vec_info (orig_cond);
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location, "New loop exit condition: ");
-      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, cond_stmt, 0);
+      OPTINFO_VECT_NOTE
+	<< "New loop exit condition: "
+	<< cond_stmt;
     }
 }
 
@@ -1370,17 +1371,16 @@  vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
 
   /* Analyze phi functions of the loop header.  */
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location, "vect_can_advance_ivs_p:\n");
   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
     {
       tree evolution_part;
 
       gphi *phi = gsi.phi ();
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-          dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
-          dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
+	  OPTINFO_VECT_NOTE << "Analyze phi: " << phi;
 	}
 
       /* Skip virtual phi's. The data dependences that are associated with
@@ -1389,9 +1389,8 @@  vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
 	 Skip reduction phis.  */
       if (!iv_phi_p (phi))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location,
-			     "reduc or virtual phi. skip.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE <<"reduc or virtual phi. skip";
 	  continue;
 	}
 
@@ -1401,9 +1400,8 @@  vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
 	= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (vinfo_for_stmt (phi));
       if (evolution_part == NULL_TREE)
         {
-	  if (dump_enabled_p ())
-	    dump_printf (MSG_MISSED_OPTIMIZATION,
-			 "No access function or evolution.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << "No access function or evolution";
 	  return false;
         }
 
@@ -1412,9 +1410,8 @@  vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
 
       if (!expr_invariant_in_loop_p (loop, evolution_part))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "evolution not invariant in loop.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << "evolution not invariant in loop";
 	  return false;
 	}
 
@@ -1423,9 +1420,8 @@  vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
 
       if (tree_is_chrec (evolution_part))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "evolution is chrec.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << "evolution is chrec";
 	  return false;
 	}
     }
@@ -1500,19 +1496,18 @@  vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo,
 
       gphi *phi = gsi.phi ();
       gphi *phi1 = gsi1.phi ();
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "vect_update_ivs_after_vectorizer: phi: ");
-	  dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
+	  OPTINFO_VECT_NOTE
+	    << "vect_update_ivs_after_vectorizer: phi: "
+	    << phi;
 	}
 
       /* Skip reduction and virtual phis.  */
       if (!iv_phi_p (phi))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location,
-			     "reduc or virtual phi. skip.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE << "reduc or virtual phi. skip.";
 	  continue;
 	}
 
@@ -1640,9 +1635,9 @@  vect_gen_prolog_loop_niters (loop_vec_info loop_vinfo,
     {
       int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
 
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
-                         "known peeling = %d.\n", npeel);
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_NOTE
+	  << optinfo_printf ("known peeling = %d", npeel);
 
       iters = build_int_cst (niters_type, npeel);
       *bound = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
@@ -1671,12 +1666,11 @@  vect_gen_prolog_loop_niters (loop_vec_info loop_vinfo,
       *bound = align_in_elems - 1;
     }
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-                       "niters for prolog loop: ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, iters);
-      dump_printf (MSG_NOTE, "\n");
+      OPTINFO_VECT_NOTE
+	<< "niters for prolog loop: "
+	<< slim (iters);
     }
 
   var = create_tmp_var (niters_type, "prolog_loop_niters");
@@ -1733,9 +1727,7 @@  vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters,
   vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
   struct data_reference *dr;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "=== vect_update_inits_of_dr ===\n");
+  VECT_OPTINFO_SCOPE ("vect_update_inits_of_dr");
 
   /* Adjust niters to sizetype and insert stmts on loop preheader edge.  */
   if (!types_compatible_p (sizetype, TREE_TYPE (niters)))
@@ -1793,12 +1785,11 @@  vect_prepare_for_masked_peels (loop_vec_info loop_vinfo)
 	}
     }
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-		       "misalignment for fully-masked loop: ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, misalign_in_elems);
-      dump_printf (MSG_NOTE, "\n");
+      OPTINFO_VECT_NOTE
+	<< "misalignment for fully-masked loop: "
+	<< slim (misalign_in_elems);
     }
 
   LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo) = misalign_in_elems;
@@ -2932,10 +2923,10 @@  vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo, tree * cond_expr)
 
   create_runtime_alias_checks (LOOP_VINFO_LOOP (loop_vinfo),
 			       &comp_alias_ddrs, cond_expr);
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "created %u versioning for alias checks.\n",
-		     comp_alias_ddrs.length ());
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << optinfo_printf ("created %u versioning for alias checks",
+			 comp_alias_ddrs.length ());
 }
 
 
@@ -3070,17 +3061,16 @@  vect_loop_versioning (loop_vec_info loop_vinfo,
       loop_constraint_set (loop, LOOP_C_INFINITE);
     }
 
-  if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
-      && dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
       if (version_alias)
-        dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
-                         "loop versioned for vectorization because of "
-			 "possible aliasing\n");
+	OPTINFO_VECT_SUCCESS
+	  << ("loop versioned for vectorization because of "
+	      "possible aliasing");
       if (version_align)
-        dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
-                         "loop versioned for vectorization to enhance "
-			 "alignment\n");
+        OPTINFO_VECT_SUCCESS
+	  << ("loop versioned for vectorization to enhance "
+	      "alignment");
 
     }
   free_original_copy_tables ();
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 4ce721ed..f5a3afc 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -54,6 +54,7 @@  along with GCC; see the file COPYING3.  If not see
 #include "tree-vector-builder.h"
 #include "vec-perm-indices.h"
 #include "tree-eh.h"
+#include "optinfo.h"
 
 /* Loop Vectorization Pass.
 
@@ -155,6 +156,15 @@  along with GCC; see the file COPYING3.  If not see
 
 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
 
+/* Generate a pending_optinfo at the current vect_optinfo_location.  */
+
+pending_optinfo
+emit_optinfo_at_vect_location (const optinfo_impl_location &impl_location,
+			       enum optinfo_kind kind)
+{
+  return pending_optinfo (impl_location, kind, vect_optinfo_location, false);
+}
+
 /* Function vect_determine_vectorization_factor
 
    Determine the vectorization factor (VF).  VF is the number of data elements
@@ -200,9 +210,7 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
   bool bool_result;
   auto_vec<stmt_vec_info> mask_producers;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_determine_vectorization_factor ===\n");
+  VECT_OPTINFO_SCOPE ("vect_determine_vectorization_factor");
 
   for (i = 0; i < nbbs; i++)
     {
@@ -213,11 +221,10 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
 	{
 	  phi = si.phi ();
 	  stmt_info = vinfo_for_stmt (phi);
-	  if (dump_enabled_p ())
-	    {
-	      dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
-	      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
-	    }
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE
+	      << "==> examining phi: "
+	      << phi;
 
 	  gcc_assert (stmt_info);
 
@@ -227,42 +234,36 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
 	      gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
               scalar_type = TREE_TYPE (PHI_RESULT (phi));
 
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_NOTE, vect_location,
-                                   "get vectype for scalar type:  ");
-		  dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
-                  dump_printf (MSG_NOTE, "\n");
+		  OPTINFO_VECT_NOTE
+		    << "get vectype for scalar type:  "
+		    << slim (scalar_type);
 		}
 
 	      vectype = get_vectype_for_scalar_type (scalar_type);
 	      if (!vectype)
 		{
-		  if (dump_enabled_p ())
+		  if (optinfo_enabled_p ())
 		    {
-		      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                                       "not vectorized: unsupported "
-                                       "data-type ");
-		      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-                                         scalar_type);
-                      dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+		      OPTINFO_VECT_FAILURE
+			<< "not vectorized: unsupported data-type "
+			<< slim (scalar_type);
 		    }
 		  return false;
 		}
 	      STMT_VINFO_VECTYPE (stmt_info) = vectype;
 
-	      if (dump_enabled_p ())
-		{
-		  dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
-		  dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
-                  dump_printf (MSG_NOTE, "\n");
-		}
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_NOTE
+		  << "vectype: "
+		  << slim (vectype);
 
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
-		  dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
-		  dump_printf (MSG_NOTE, "\n");
+		  OPTINFO_VECT_NOTE
+		    << "nunits = "
+		    << TYPE_VECTOR_SUBPARTS (vectype);
 		}
 
 	      vect_update_max_nunits (&vectorization_factor, vectype);
@@ -281,11 +282,11 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
 
           stmt_info = vinfo_for_stmt (stmt);
 
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-                               "==> examining statement: ");
-	      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
+	      OPTINFO_VECT_NOTE
+		<< "==> examining statement: "
+		<< stmt;
 	    }
 
 	  gcc_assert (stmt_info);
@@ -302,17 +303,18 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
                 {
                   stmt = pattern_stmt;
                   stmt_info = vinfo_for_stmt (pattern_stmt);
-                  if (dump_enabled_p ())
+                  if (optinfo_enabled_p ())
                     {
-                      dump_printf_loc (MSG_NOTE, vect_location,
-                                       "==> examining pattern statement: ");
-                      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
+                      OPTINFO_VECT_NOTE
+			<< "==> examining pattern statement: "
+			<< stmt;
                     }
                 }
               else
 	        {
-	          if (dump_enabled_p ())
-	            dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
+	          if (optinfo_enabled_p ())
+	            OPTINFO_VECT_NOTE
+		      << "skip";
                   gsi_next (&si);
 	          continue;
                 }
@@ -351,12 +353,11 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
 
 		  if (!gsi_end_p (pattern_def_si))
 		    {
-		      if (dump_enabled_p ())
+		      if (optinfo_enabled_p ())
 			{
-			  dump_printf_loc (MSG_NOTE, vect_location,
-                                           "==> examining pattern def stmt: ");
-			  dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
-                                            pattern_def_stmt, 0);
+			  OPTINFO_VECT_NOTE
+			    << "==> examining pattern def stmt: "
+			    << pattern_def_stmt;
 			}
 
 		      stmt = pattern_def_stmt;
@@ -391,23 +392,20 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
 		    }
 		  continue;
 		}
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-	          dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                                   "not vectorized: irregular stmt.");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION,  TDF_SLIM, stmt,
-                                    0);
+	          OPTINFO_VECT_FAILURE
+		    << "not vectorized: irregular stmt: " << stmt;
 		}
 	      return false;
 	    }
 
 	  if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
 	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 	        {
-	          dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                                   "not vectorized: vector stmt in loop:");
-	          dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+	          OPTINFO_VECT_FAILURE
+		    << "not vectorized: vector stmt in loop: " << stmt;
 	        }
 	      return false;
 	    }
@@ -461,24 +459,20 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
 		    }
 		}
 
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_NOTE, vect_location,
-                                   "get vectype for scalar type:  ");
-		  dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
-                  dump_printf (MSG_NOTE, "\n");
+		  OPTINFO_VECT_NOTE
+		    << "get vectype for scalar type:  "
+		    << slim (scalar_type);
 		}
 	      vectype = get_vectype_for_scalar_type (scalar_type);
 	      if (!vectype)
 		{
-		  if (dump_enabled_p ())
+		  if (optinfo_enabled_p ())
 		    {
-		      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                                       "not vectorized: unsupported "
-                                       "data-type ");
-		      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-                                         scalar_type);
-                      dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+		      OPTINFO_VECT_FAILURE
+			<< "not vectorized: unsupported data-type: "
+			<< slim (scalar_type);
 		    }
 		  return false;
 		}
@@ -486,11 +480,11 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
 	      if (!bool_result)
 		STMT_VINFO_VECTYPE (stmt_info) = vectype;
 
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
-		  dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
-                  dump_printf (MSG_NOTE, "\n");
+		  OPTINFO_VECT_NOTE
+		    << "vectype: "
+		    << slim (vectype);
 		}
             }
 
@@ -506,24 +500,21 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
 	      if (!bool_result)
 		scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
 							     &dummy);
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_NOTE, vect_location,
-				   "get vectype for scalar type:  ");
-		  dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
-		  dump_printf (MSG_NOTE, "\n");
+		  OPTINFO_VECT_NOTE
+		    << "get vectype for scalar type:  "
+		    << slim (scalar_type);
 		}
 	      vf_vectype = get_vectype_for_scalar_type (scalar_type);
 	    }
 	  if (!vf_vectype)
 	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                                   "not vectorized: unsupported data-type ");
-		  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-                                     scalar_type);
-                  dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+		  OPTINFO_VECT_FAILURE
+		    << "not vectorized: unsupported data-type "
+		    << slim (scalar_type);
 		}
 	      return false;
 	    }
@@ -531,33 +522,29 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
 	  if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
 			GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
 	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                                   "not vectorized: different sized vector "
-                                   "types in statement, ");
-		  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-                                     vectype);
-		  dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
-		  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-                                     vf_vectype);
-                  dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+		  OPTINFO_VECT_FAILURE
+		    << ("not vectorized: different sized vector "
+			"types in statement, ")
+		    << slim (vectype)
+		    << " and "
+		    << slim (vf_vectype);
 		}
 	      return false;
 	    }
 
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype);
-              dump_printf (MSG_NOTE, "\n");
+	      OPTINFO_VECT_NOTE
+		<< "vectype: " << slim (vf_vectype);
 	    }
 
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
-	      dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vf_vectype));
-	      dump_printf (MSG_NOTE, "\n");
+	      OPTINFO_VECT_NOTE
+		<< "nunits = "
+		<< TYPE_VECTOR_SUBPARTS (vf_vectype);
 	    }
 
 	  vect_update_max_nunits (&vectorization_factor, vf_vectype);
@@ -571,18 +558,18 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
     }
 
   /* TODO: Analyze cost. Decide if worth while to vectorize.  */
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
-      dump_dec (MSG_NOTE, vectorization_factor);
-      dump_printf (MSG_NOTE, "\n");
+      OPTINFO_VECT_NOTE
+	<< "vectorization factor = "
+	<< vectorization_factor;
     }
 
   if (known_le (vectorization_factor, 1U))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                         "not vectorized: unsupported data-type\n");
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE
+	  << "not vectorized: unsupported data-type";
       return false;
     }
   LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
@@ -603,9 +590,9 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
 
 	  if (!mask_type)
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "not vectorized: unsupported mask\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE
+		  << "not vectorized: unsupported mask";
 	      return false;
 	    }
 	}
@@ -621,13 +608,12 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
 	      if (!vect_is_simple_use (rhs, mask_producers[i]->vinfo,
 				       &def_stmt, &dt, &vectype))
 		{
-		  if (dump_enabled_p ())
+		  if (optinfo_enabled_p ())
 		    {
-		      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				       "not vectorized: can't compute mask type "
-				       "for statement, ");
-		      dump_gimple_stmt (MSG_MISSED_OPTIMIZATION,  TDF_SLIM, stmt,
-					0);
+		      OPTINFO_VECT_FAILURE
+			<< ("not vectorized: can't compute mask type "
+			    "for statement, ")
+			<< stmt;
 		    }
 		  return false;
 		}
@@ -643,34 +629,28 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
 	      else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
 				 TYPE_VECTOR_SUBPARTS (vectype)))
 		{
-		  if (dump_enabled_p ())
+		  if (optinfo_enabled_p ())
 		    {
-		      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				       "not vectorized: different sized masks "
-				       "types in statement, ");
-		      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-					 mask_type);
-		      dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
-		      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-					 vectype);
-		      dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+		      OPTINFO_VECT_FAILURE
+			<< ("not vectorized: different sized masks "
+			    "types in statement, ")
+			<< slim (mask_type)
+			<< " and "
+			<< slim (vectype);
 		    }
 		  return false;
 		}
 	      else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
 		       != VECTOR_BOOLEAN_TYPE_P (vectype))
 		{
-		  if (dump_enabled_p ())
+		  if (optinfo_enabled_p ())
 		    {
-		      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				       "not vectorized: mixed mask and "
-				       "nonmask vector types in statement, ");
-		      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-					 mask_type);
-		      dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
-		      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-					 vectype);
-		      dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+		      OPTINFO_VECT_FAILURE
+			<< ("not vectorized: mixed mask and "
+			    "nonmask vector types in statement, ")
+			<< slim (mask_type)
+			<< " and "
+			<< slim (vectype);
 		    }
 		  return false;
 		}
@@ -690,13 +670,12 @@  vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
 	 if-conversion.  */
       if (!mask_type)
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			       "not vectorized: can't compute mask type "
-			       "for statement, ");
-	      dump_gimple_stmt (MSG_MISSED_OPTIMIZATION,  TDF_SLIM, stmt,
-				0);
+	      OPTINFO_VECT_FAILURE
+		<< ("not vectorized: can't compute mask type "
+		    "for statement, ")
+		<< stmt;
 	    }
 	  return false;
 	}
@@ -735,13 +714,11 @@  vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
   step_expr = evolution_part;
   init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location, "step: ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
-      dump_printf (MSG_NOTE, ",  init: ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
-      dump_printf (MSG_NOTE, "\n");
+      OPTINFO_VECT_NOTE
+	<< "step: " << slim (step_expr)
+	<< ",  init: " << slim (init_expr);
     }
 
   *init = init_expr;
@@ -757,9 +734,9 @@  vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
       && (TREE_CODE (step_expr) != REAL_CST
 	  || !flag_associative_math))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                         "step unknown.\n");
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE
+	  << "step unknown";
       return false;
     }
 
@@ -782,9 +759,7 @@  vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
   gphi_iterator gsi;
   bool double_reduc;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_analyze_scalar_cycles ===\n");
+  VECT_OPTINFO_SCOPE ("vect_analyze_scalar_cycles");
 
   /* First - identify all inductions.  Reduction detection assumes that all the
      inductions have been identified, therefore, this order must not be
@@ -796,10 +771,10 @@  vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
       tree def = PHI_RESULT (phi);
       stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
-	  dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
+	  OPTINFO_VECT_NOTE
+	    << "Analyze phi: " << phi;
 	}
 
       /* Skip virtual phi's.  The data dependences that are associated with
@@ -814,12 +789,10 @@  vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
       if (access_fn)
 	{
 	  STRIP_NOPS (access_fn);
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-                               "Access function of PHI: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
-              dump_printf (MSG_NOTE, "\n");
+	      OPTINFO_VECT_NOTE
+		<< "Access function of PHI: " << slim (access_fn);
 	    }
 	  STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
 	    = initial_condition_in_loop_num (access_fn, loop->num);
@@ -840,8 +813,9 @@  vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
 		  != NULL_TREE);
       gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
 
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << "Detected induction";
       STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
     }
 
@@ -854,10 +828,10 @@  vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
       stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
       gimple *reduc_stmt;
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
-          dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
-          dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
+          OPTINFO_VECT_NOTE
+	    << "Analyze phi: " << phi;
         }
 
       gcc_assert (!virtual_operand_p (def)
@@ -869,9 +843,9 @@  vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
         {
           if (double_reduc)
             {
-              if (dump_enabled_p ())
-                dump_printf_loc (MSG_NOTE, vect_location,
-				 "Detected double reduction.\n");
+              if (optinfo_enabled_p ())
+                OPTINFO_VECT_NOTE
+		  << "Detected double reduction";
 
               STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
               STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
@@ -881,9 +855,9 @@  vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
             {
               if (loop != LOOP_VINFO_LOOP (loop_vinfo))
                 {
-                  if (dump_enabled_p ())
-                    dump_printf_loc (MSG_NOTE, vect_location,
-				     "Detected vectorizable nested cycle.\n");
+                  if (optinfo_enabled_p ())
+                    OPTINFO_VECT_NOTE
+		      << "Detected vectorizable nested cycle";
 
                   STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
                   STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
@@ -891,9 +865,9 @@  vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
                 }
               else
                 {
-                  if (dump_enabled_p ())
-                    dump_printf_loc (MSG_NOTE, vect_location,
-				     "Detected reduction.\n");
+                  if (optinfo_enabled_p ())
+                    OPTINFO_VECT_NOTE
+		      << "Detected reduction";
 
                   STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
                   STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
@@ -907,9 +881,9 @@  vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
             }
         }
       else
-        if (dump_enabled_p ())
-          dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			   "Unknown def-use cycle pattern.\n");
+        if (optinfo_enabled_p ())
+          OPTINFO_VECT_FAILURE
+	    << "Unknown def-use cycle pattern";
     }
 }
 
@@ -1029,9 +1003,8 @@  vect_get_loop_niters (struct loop *loop, tree *assumptions,
   *assumptions = boolean_true_node;
   *number_of_iterationsm1 = chrec_dont_know;
   *number_of_iterations = chrec_dont_know;
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "=== get_loop_niters ===\n");
+
+  VECT_OPTINFO_SCOPE ("get_loop_niters");
 
   if (!exit)
     return cond;
@@ -1470,9 +1443,7 @@  vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
 			  tree *assumptions, tree *number_of_iterationsm1,
 			  tree *number_of_iterations, gcond **inner_loop_cond)
 {
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "=== vect_analyze_loop_form ===\n");
+  VECT_OPTINFO_SCOPE ("vect_analyze_loop_form");
 
   /* Different restrictions apply when we are considering an inner-most loop,
      vs. an outer (nested) loop.
@@ -1494,17 +1465,17 @@  vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
 
       if (loop->num_nodes != 2)
         {
-          if (dump_enabled_p ())
-            dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: control flow in loop.\n");
+          if (optinfo_enabled_p ())
+            OPTINFO_VECT_FAILURE
+	      << "not vectorized: control flow in loop";
           return false;
         }
 
       if (empty_block_p (loop->header))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: empty loop.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "not vectorized: empty loop";
 	  return false;
 	}
     }
@@ -1532,17 +1503,17 @@  vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
 
       if ((loop->inner)->inner || (loop->inner)->next)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: multiple nested loops.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "not vectorized: multiple nested loops";
 	  return false;
 	}
 
       if (loop->num_nodes != 5)
         {
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: control flow in loop.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "not vectorized: control flow in loop";
 	  return false;
         }
 
@@ -1551,9 +1522,9 @@  vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
 	  || !single_exit (innerloop)
 	  || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: unsupported outerloop form.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "not vectorized: unsupported outerloop form";
 	  return false;
 	}
 
@@ -1566,37 +1537,36 @@  vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
 	     loop.  */
 	  || !integer_onep (inner_assumptions))
 	{
-	  if (dump_enabled_p ())
-            dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: Bad inner loop.\n");
+	  if (optinfo_enabled_p ())
+            OPTINFO_VECT_FAILURE
+	      << "not vectorized: bad inner loop";
 	  return false;
 	}
 
       if (!expr_invariant_in_loop_p (loop, inner_niter))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: inner-loop count not"
-                             " invariant.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "not vectorized: inner-loop count not invariant";
 	  return false;
 	}
 
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
-			 "Considering outer-loop vectorization.\n");
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_NOTE
+	  << "Considering outer-loop vectorization";
     }
 
   if (!single_exit (loop)
       || EDGE_COUNT (loop->header->preds) != 2)
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
           if (!single_exit (loop))
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: multiple exits.\n");
+	    OPTINFO_VECT_FAILURE
+	      << "not vectorized: multiple exits";
           else if (EDGE_COUNT (loop->header->preds) != 2)
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: too many incoming edges.\n");
+	    OPTINFO_VECT_FAILURE
+	      << "not vectorized: too many incoming edges";
         }
       return false;
     }
@@ -1608,9 +1578,9 @@  vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
   if (!empty_block_p (loop->latch)
       || !gimple_seq_empty_p (phi_nodes (loop->latch)))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "not vectorized: latch block not empty.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "not vectorized: latch block not empty";
       return false;
     }
 
@@ -1618,9 +1588,9 @@  vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
   edge e = single_exit (loop);
   if (e->flags & EDGE_ABNORMAL)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "not vectorized: abnormal loop exit edge.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "not vectorized: abnormal loop exit edge";
       return false;
     }
 
@@ -1628,9 +1598,9 @@  vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
 				     number_of_iterationsm1);
   if (!*loop_cond)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "not vectorized: complicated exit condition.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "not vectorized: complicated exit condition";
       return false;
     }
 
@@ -1638,18 +1608,17 @@  vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
       || !*number_of_iterations
       || chrec_contains_undetermined (*number_of_iterations))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "not vectorized: number of iterations cannot be "
-			 "computed.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "not vectorized: number of iterations cannot be computed";
       return false;
     }
 
   if (integer_zerop (*number_of_iterations))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "not vectorized: number of iterations = 0.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "not vectorized: number of iterations = 0";
       return false;
     }
 
@@ -1688,15 +1657,10 @@  vect_analyze_loop_form (struct loop *loop)
     }
 
   if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
-    {
-      if (dump_enabled_p ())
-        {
-          dump_printf_loc (MSG_NOTE, vect_location,
-			   "Symbolic number of iterations is ");
-	  dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
-          dump_printf (MSG_NOTE, "\n");
-        }
-    }
+    if (optinfo_enabled_p ())
+      OPTINFO_VECT_NOTE
+	<< "symbolic number of iterations is "
+	<< details (number_of_iterations);
 
   STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
   if (inner_loop_cond)
@@ -1722,9 +1686,7 @@  vect_update_vf_for_slp (loop_vec_info loop_vinfo)
   poly_uint64 vectorization_factor;
   int i;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "=== vect_update_vf_for_slp ===\n");
+  VECT_OPTINFO_SCOPE ("vect_update_vf_for_slp");
 
   vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
   gcc_assert (known_ne (vectorization_factor, 0U));
@@ -1759,14 +1721,14 @@  vect_update_vf_for_slp (loop_vec_info loop_vinfo)
 
   if (only_slp_in_loop)
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-		       "Loop contains only SLP stmts\n");
+      OPTINFO_VECT_NOTE
+	<< "Loop contains only SLP stmts";
       vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
     }
   else
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-		       "Loop contains SLP and non-SLP stmts\n");
+      OPTINFO_VECT_NOTE
+	<< "Loop contains SLP and non-SLP stmts";
       /* Both the vectorization factor and unroll factor have the form
 	 current_vector_size * X for some rational X, so they must have
 	 a common multiple.  */
@@ -1776,12 +1738,11 @@  vect_update_vf_for_slp (loop_vec_info loop_vinfo)
     }
 
   LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-		       "Updating vectorization factor to ");
-      dump_dec (MSG_NOTE, vectorization_factor);
-      dump_printf (MSG_NOTE, ".\n");
+      OPTINFO_VECT_NOTE
+	<< "Updating vectorization factor to "
+	<< vectorization_factor;
     }
 }
 
@@ -1827,9 +1788,7 @@  vect_analyze_loop_operations (loop_vec_info loop_vinfo)
   bool need_to_vectorize = false;
   bool ok;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "=== vect_analyze_loop_operations ===\n");
+  VECT_OPTINFO_SCOPE ("vect_analyze_loop_operations");
 
   for (i = 0; i < nbbs; i++)
     {
@@ -1842,10 +1801,11 @@  vect_analyze_loop_operations (loop_vec_info loop_vinfo)
           ok = true;
 
           stmt_info = vinfo_for_stmt (phi);
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
             {
-              dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
-              dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
+              OPTINFO_VECT_NOTE
+		<< "examining phi: "
+		<< phi;
             }
 	  if (virtual_operand_p (gimple_phi_result (phi)))
 	    continue;
@@ -1861,10 +1821,9 @@  vect_analyze_loop_operations (loop_vec_info loop_vinfo)
               if (STMT_VINFO_LIVE_P (stmt_info)
 		  && !vect_active_double_reduction_p (stmt_info))
                 {
-                  if (dump_enabled_p ())
-		    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				     "Unsupported loop-closed phi in "
-				     "outer-loop.\n");
+                  if (optinfo_enabled_p ())
+		    OPTINFO_VECT_FAILURE
+		      << "Unsupported loop-closed phi in outer-loop";
                   return false;
                 }
 
@@ -1905,9 +1864,9 @@  vect_analyze_loop_operations (loop_vec_info loop_vinfo)
               && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
             {
               /* A scalar-dependence cycle that we don't support.  */
-              if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "not vectorized: scalar dependence cycle.\n");
+              if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE
+		  << "not vectorized: scalar dependence cycle";
               return false;
             }
 
@@ -1931,12 +1890,11 @@  vect_analyze_loop_operations (loop_vec_info loop_vinfo)
 
           if (!ok)
             {
-              if (dump_enabled_p ())
+              if (optinfo_enabled_p ())
                 {
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				   "not vectorized: relevant phi not "
-				   "supported: ");
-                  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
+		  OPTINFO_VECT_FAILURE
+		    << "not vectorized: relevant phi not supported: "
+		    << phi;
                 }
 	      return false;
             }
@@ -1959,13 +1917,12 @@  vect_analyze_loop_operations (loop_vec_info loop_vinfo)
      touching this loop.  */
   if (!need_to_vectorize)
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
-			 "All the computation can be taken out of the loop.\n");
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "not vectorized: redundant loop. no profit to "
-			 "vectorize.\n");
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_NOTE
+	  << "All the computation can be taken out of the loop";
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "not vectorized: redundant loop. no profit to vectorize";
       return false;
     }
 
@@ -1996,10 +1953,10 @@  vect_analyze_loop_costing (loop_vec_info loop_vinfo)
       if (max_niter != -1
 	  && (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: iteration count smaller than "
-			     "vectorization factor.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << ("not vectorized: iteration count smaller than "
+		  "vectorization factor");
 	  return 0;
 	}
     }
@@ -2010,13 +1967,12 @@  vect_analyze_loop_costing (loop_vec_info loop_vinfo)
 
   if (min_profitable_iters < 0)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "not vectorized: vectorization not profitable.\n");
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "not vectorized: vector version will never be "
-			 "profitable.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "not vectorized: vectorization not profitable";
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "not vectorized: vector version will never be profitable";
       return -1;
     }
 
@@ -2033,14 +1989,14 @@  vect_analyze_loop_costing (loop_vec_info loop_vinfo)
   if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
       && LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "not vectorized: vectorization not profitable.\n");
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-			 "not vectorized: iteration count smaller than user "
-			 "specified loop bound parameter or minimum profitable "
-			 "iterations (whichever is more conservative).\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "not vectorized: vectorization not profitable";
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << ("not vectorized: iteration count smaller than user "
+	      "specified loop bound parameter or minimum profitable "
+	      "iterations (whichever is more conservative)");
       return 0;
     }
 
@@ -2051,16 +2007,15 @@  vect_analyze_loop_costing (loop_vec_info loop_vinfo)
       && ((unsigned HOST_WIDE_INT) estimated_niter
 	  < MAX (th, (unsigned) min_profitable_estimate)))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "not vectorized: estimated iteration count too "
-			 "small.\n");
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-			 "not vectorized: estimated iteration count smaller "
-			 "than specified loop bound parameter or minimum "
-			 "profitable iterations (whichever is more "
-			 "conservative).\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "not vectorized: estimated iteration count too small";
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << ("not vectorized: estimated iteration count smaller "
+	      "than specified loop bound parameter or minimum "
+	      "profitable iterations (whichever is more "
+	      "conservative)");
       return -1;
     }
 
@@ -2093,11 +2048,11 @@  vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
   loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
   if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "not vectorized: loop nest containing two "
-			 "or more consecutive inner loops cannot be "
-			 "vectorized\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << ("not vectorized: loop nest containing two "
+	      "or more consecutive inner loops cannot be "
+	      "vectorized");
       return false;
     }
 
@@ -2142,11 +2097,11 @@  vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
 		      }
 		  }
 	      }
-	    if (dump_enabled_p ())
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			       "not vectorized: loop contains function "
-			       "calls or data references that cannot "
-			       "be analyzed\n");
+	    if (optinfo_enabled_p ())
+	      OPTINFO_VECT_FAILURE
+		<< ("not vectorized: loop contains function "
+		    "calls or data references that cannot "
+		    "be analyzed");
 	    return false;
 	  }
       }
@@ -2157,9 +2112,9 @@  vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
   ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
   if (!ok)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "bad data references.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "bad data references";
       return false;
     }
 
@@ -2177,9 +2132,9 @@  vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
   ok = vect_analyze_data_ref_accesses (loop_vinfo);
   if (!ok)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "bad data access.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "bad data access";
       return false;
     }
 
@@ -2188,9 +2143,9 @@  vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
   ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
   if (!ok)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "unexpected pattern.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "unexpected pattern";
       return false;
     }
 
@@ -2207,9 +2162,9 @@  vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
       || (max_vf != MAX_VECTORIZATION_FACTOR
 	  && maybe_lt (max_vf, min_vf)))
     {
-      if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "bad data dependence.\n");
+      if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "bad data dependence";
       return false;
     }
   LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
@@ -2217,17 +2172,17 @@  vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
   ok = vect_determine_vectorization_factor (loop_vinfo);
   if (!ok)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "can't determine vectorization factor.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "can't determine vectorization factor";
       return false;
     }
   if (max_vf != MAX_VECTORIZATION_FACTOR
       && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "bad data dependence.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "bad data dependence";
       return false;
     }
 
@@ -2266,13 +2221,13 @@  start_over:
   poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
   gcc_assert (known_ne (vectorization_factor, 0U));
 
-  if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
+  if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-		       "vectorization_factor = ");
-      dump_dec (MSG_NOTE, vectorization_factor);
-      dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n",
-		   LOOP_VINFO_INT_NITERS (loop_vinfo));
+      OPTINFO_VECT_NOTE
+	<< "vectorization_factor = "
+	<< vectorization_factor
+	<< optinfo_printf (", niters = " HOST_WIDE_INT_PRINT_DEC,
+			   LOOP_VINFO_INT_NITERS (loop_vinfo));
     }
 
   HOST_WIDE_INT max_niter
@@ -2284,9 +2239,9 @@  start_over:
   ok = vect_analyze_data_refs_alignment (loop_vinfo);
   if (!ok)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "bad data alignment.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "bad data alignment";
       return false;
     }
 
@@ -2306,9 +2261,9 @@  start_over:
     ok = vect_enhance_data_refs_alignment (loop_vinfo);
     if (!ok)
       {
-	if (dump_enabled_p ())
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			   "bad data alignment.\n");
+	if (optinfo_enabled_p ())
+	  OPTINFO_VECT_FAILURE
+	    << "bad data alignment";
         return false;
       }
     }
@@ -2329,9 +2284,9 @@  start_over:
   ok = vect_analyze_loop_operations (loop_vinfo);
   if (!ok)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "bad operation or unsupported loop bound.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "bad operation or unsupported loop bound";
       return false;
     }
 
@@ -2340,14 +2295,14 @@  start_over:
   LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
     = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
        && vect_verify_full_masking (loop_vinfo));
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
       if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
-	dump_printf_loc (MSG_NOTE, vect_location,
-			 "using a fully-masked loop.\n");
+	OPTINFO_VECT_NOTE
+	  << "using a fully-masked loop";
       else
-	dump_printf_loc (MSG_NOTE, vect_location,
-			 "not using a fully-masked loop.\n");
+	OPTINFO_VECT_NOTE
+	  << "not using a fully-masked loop";
     }
 
   /* If epilog loop is required because of data accesses with gaps,
@@ -2362,10 +2317,10 @@  start_over:
 
       if (known_lt (wi::to_widest (scalar_niters), vf))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location,
-			     "loop has no enough iterations to support"
-			     " peeling for gaps.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE
+	      << ("loop has not enough iterations to support"
+		  " peeling for gaps");
 	  return false;
 	}
     }
@@ -2376,9 +2331,9 @@  start_over:
     goto again;
   if (!res)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "Loop costings not worthwhile.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "Loop costings not worthwhile";
       return false;
     }
 
@@ -2414,17 +2369,17 @@  start_over:
   if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
       || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_NOTE
+	  << "epilog loop required";
       if (!vect_can_advance_ivs_p (loop_vinfo)
 	  || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
 					   single_exit (LOOP_VINFO_LOOP
 							 (loop_vinfo))))
         {
-          if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: can't create required "
-			     "epilog loop\n");
+          if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "not vectorized: can't create required epilog loop";
           goto again;
         }
     }
@@ -2509,9 +2464,9 @@  again:
 	}
     }
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "re-trying with SLP disabled\n");
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << "re-trying with SLP disabled";
 
   /* Roll back state appropriately.  No SLP this time.  */
   slp = false;
@@ -2587,17 +2542,15 @@  vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo)
   targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
   unsigned int next_size = 0;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "===== analyze_loop_nest =====\n");
+  VECT_OPTINFO_SCOPE ("analyze_loop_nest");
 
   if (loop_outer (loop)
       && loop_vec_info_for_loop (loop_outer (loop))
       && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-			 "outer-loop already vectorized.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << "outer-loop already vectorized";
       return NULL;
     }
 
@@ -2608,9 +2561,9 @@  vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo)
       loop_vinfo = vect_analyze_loop_form (loop);
       if (!loop_vinfo)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "bad loop form.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "bad loop form";
 	  return NULL;
 	}
 
@@ -2642,13 +2595,11 @@  vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo)
 
       /* Try the next biggest vector size.  */
       current_vector_size = vector_sizes[next_size++];
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "***** Re-trying analysis with "
-			   "vector size ");
-	  dump_dec (MSG_NOTE, current_vector_size);
-	  dump_printf (MSG_NOTE, "\n");
+	  OPTINFO_VECT_NOTE
+	    << "***** Re-trying analysis with vector size "
+	    << current_vector_size;
 	}
     }
 }
@@ -2775,10 +2726,10 @@  neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
    STMT is printed with a message MSG. */
 
 static void
-report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
+report_vect_op (enum optinfo_kind kind, gimple *stmt, const char *msg)
 {
-  dump_printf_loc (msg_type, vect_location, "%s", msg);
-  dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
+  OPTINFO_VECT (kind)
+    << msg << stmt;
 }
 
 
@@ -2945,10 +2896,10 @@  vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
                                   == vect_internal_def
                       && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
   	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
-		  dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
+		  OPTINFO_VECT_NOTE
+		    << "swapping oprnds: " << next_stmt;
 		}
 
 	      swap_ssa_operands (next_stmt,
@@ -3074,17 +3025,15 @@  pop:
 	}
     }
   while (1);
-  if (dump_file && (dump_flags & TDF_DETAILS))
+  // FIXME: TDF_DETAILS!
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
+      pending_optinfo info = OPTINFO_NOTE (loop) // FIXME: loop vs loc
+	<< "reduction path: ";
       unsigned i;
       std::pair<ssa_op_iter, use_operand_p> *x;
       FOR_EACH_VEC_ELT (path, i, x)
-	{
-	  dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second));
-	  dump_printf (MSG_NOTE, " ");
-	}
-      dump_printf (MSG_NOTE, "\n");
+	info << slim (USE_FROM_PTR (x->second)) << " ";
     }
 
   /* Check whether the reduction path detected is valid.  */
@@ -3201,9 +3150,9 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
 
       if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
         {
-          if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "intermediate value used outside loop.\n");
+          if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "intermediate value used outside loop";
 
           return NULL;
         }
@@ -3211,9 +3160,9 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
       nloop_uses++;
       if (nloop_uses > 1)
         {
-          if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "reduction value used in loop.\n");
+          if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "reduction value used in loop";
           return NULL;
         }
 
@@ -3224,13 +3173,10 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
   tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
   if (TREE_CODE (loop_arg) != SSA_NAME)
     {
-      if (dump_enabled_p ())
-	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			   "reduction: not ssa_name: ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
-          dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
-	}
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "reduction: not ssa_name: "
+	  << slim (loop_arg);
       return NULL;
     }
 
@@ -3247,11 +3193,11 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
     }
   else
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			   "reduction: unhandled reduction operation: ");
-	  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def_stmt, 0);
+	  OPTINFO_VECT_FAILURE
+	    << "reduction: unhandled reduction operation: "
+	    << def_stmt;
 	}
       return NULL;
     }
@@ -3273,9 +3219,9 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
 	lcphis.safe_push (as_a <gphi *> (use_stmt));
       if (nloop_uses > 1)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "reduction used in loop.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "reduction used in loop";
 	  return NULL;
 	}
     }
@@ -3289,9 +3235,9 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
       if (gimple_phi_num_args (def_stmt) != 1
           || TREE_CODE (op1) != SSA_NAME)
         {
-          if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "unsupported phi node definition.\n");
+          if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "unsupported phi node definition";
 
           return NULL;
         }
@@ -3304,8 +3250,8 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
           && is_gimple_assign (def1)
 	  && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
         {
-          if (dump_enabled_p ())
-            report_vect_op (MSG_NOTE, def_stmt,
+          if (optinfo_enabled_p ())
+            report_vect_op (OPTINFO_KIND_NOTE, def_stmt,
 			    "detected double reduction: ");
 
           *double_reduc = true;
@@ -3358,8 +3304,8 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
         }
       if (op3 == phi_name || op4 == phi_name)
 	{
-	  if (dump_enabled_p ())
-	    report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
+	  if (optinfo_enabled_p ())
+	    report_vect_op (OPTINFO_KIND_FAILURE, def_stmt,
 			    "reduction: condition depends on previous"
 			    " iteration: ");
 	  return NULL;
@@ -3370,8 +3316,8 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
     }
   else if (!commutative_tree_code (code) || !associative_tree_code (code))
     {
-      if (dump_enabled_p ())
-	report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
+      if (optinfo_enabled_p ())
+	report_vect_op (OPTINFO_KIND_FAILURE, def_stmt,
 			"reduction: not commutative/associative: ");
       return NULL;
     }
@@ -3382,16 +3328,16 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
     }
   else
     {
-      if (dump_enabled_p ())
-	report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
+      if (optinfo_enabled_p ())
+	report_vect_op (OPTINFO_KIND_FAILURE, def_stmt,
 			"reduction: not handled operation: ");
       return NULL;
     }
 
   if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
     {
-      if (dump_enabled_p ())
-	report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
+      if (optinfo_enabled_p ())
+	report_vect_op (OPTINFO_KIND_FAILURE, def_stmt,
 			"reduction: both uses not ssa_names: ");
 
       return NULL;
@@ -3407,31 +3353,21 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
       || (op4 && TREE_CODE (op4) == SSA_NAME
           && !types_compatible_p (type, TREE_TYPE (op4))))
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
-          dump_printf_loc (MSG_NOTE, vect_location,
-			   "reduction: multiple types: operation type: ");
-          dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
-          dump_printf (MSG_NOTE, ", operands types: ");
-          dump_generic_expr (MSG_NOTE, TDF_SLIM,
-			     TREE_TYPE (op1));
-          dump_printf (MSG_NOTE, ",");
-          dump_generic_expr (MSG_NOTE, TDF_SLIM,
-			     TREE_TYPE (op2));
+	  pending_optinfo info
+	    = OPTINFO_VECT_NOTE
+	        << "reduction: multiple types: operation type: "
+		<< slim (type)
+		<< ", operands types: "
+		<< slim (TREE_TYPE (op1))
+		<< ","
+		<< slim (TREE_TYPE (op2));
           if (op3)
-            {
-              dump_printf (MSG_NOTE, ",");
-              dump_generic_expr (MSG_NOTE, TDF_SLIM,
-				 TREE_TYPE (op3));
-            }
+	    info << "," << slim (TREE_TYPE (op3));
 
           if (op4)
-            {
-              dump_printf (MSG_NOTE, ",");
-              dump_generic_expr (MSG_NOTE, TDF_SLIM,
-				 TREE_TYPE (op4));
-            }
-          dump_printf (MSG_NOTE, "\n");
+	    info << "," << slim (TREE_TYPE (op4));
         }
 
       return NULL;
@@ -3463,8 +3399,8 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
   if (code != COND_EXPR
       && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
     {
-      if (dump_enabled_p ())
-	report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
+      if (optinfo_enabled_p ())
+	report_vect_op (OPTINFO_KIND_NOTE, def_stmt, "reduction: no defs for operands: ");
       return NULL;
     }
 
@@ -3486,8 +3422,8 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
                           == vect_internal_def
  	              && !is_loop_header_bb_p (gimple_bb (def1)))))))
     {
-      if (dump_enabled_p ())
-	report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
+      if (optinfo_enabled_p ())
+	report_vect_op (OPTINFO_KIND_NOTE, def_stmt, "detected reduction: ");
       return def_stmt;
     }
 
@@ -3531,8 +3467,8 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
 		}
 	      else
 		{
-		  if (dump_enabled_p ())
-		    report_vect_op (MSG_NOTE, def_stmt,
+		  if (optinfo_enabled_p ())
+		    report_vect_op (OPTINFO_KIND_NOTE, def_stmt,
 				    "detected reduction: cannot swap operands "
 				    "for cond_expr");
 		  return NULL;
@@ -3542,8 +3478,8 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
 	    swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
 			       gimple_assign_rhs2_ptr (def_stmt));
 
-	  if (dump_enabled_p ())
-	    report_vect_op (MSG_NOTE, def_stmt,
+	  if (optinfo_enabled_p ())
+	    report_vect_op (OPTINFO_KIND_NOTE, def_stmt,
 			    "detected reduction: need to swap operands: ");
 
 	  if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
@@ -3551,8 +3487,8 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
         }
       else
         {
-          if (dump_enabled_p ())
-            report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
+          if (optinfo_enabled_p ())
+            report_vect_op (OPTINFO_KIND_NOTE, def_stmt, "detected reduction: ");
         }
 
       return def_stmt;
@@ -3564,8 +3500,8 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
       && orig_code != MINUS_EXPR
       && vect_is_slp_reduction (loop_info, phi, def_stmt))
     {
-      if (dump_enabled_p ())
-        report_vect_op (MSG_NOTE, def_stmt,
+      if (optinfo_enabled_p ())
+        report_vect_op (OPTINFO_KIND_NOTE, def_stmt,
 			"reduction: detected reduction chain: ");
 
       return def_stmt;
@@ -3586,9 +3522,9 @@  vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
 			    code))
     return def_stmt;
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
+      report_vect_op (OPTINFO_KIND_FAILURE, def_stmt,
 		      "reduction: unknown pattern: ");
     }
 
@@ -3634,10 +3570,10 @@  vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
   if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
     {
       *peel_iters_epilogue = assumed_vf / 2;
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
-			 "cost model: epilogue peel iters set to vf/2 "
-			 "because loop iterations are unknown .\n");
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_NOTE
+	  << ("cost model: epilogue peel iters set to vf/2 "
+	      "because loop iterations are unknown");
 
       /* If peeled iterations are known but number of scalar loop
          iterations are unknown, count a taken branch per peeled loop.  */
@@ -3720,7 +3656,8 @@  vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
   /* Cost model disabled.  */
   if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
     {
-      dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
+      OPTINFO_VECT_NOTE
+	<< "cost model disabled";
       *ret_min_profitable_niters = 0;
       *ret_min_profitable_estimate = 0;
       return;
@@ -3980,25 +3917,28 @@  vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
 
   vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
   
-  if (dump_enabled_p ())
-    {
-      dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
-      dump_printf (MSG_NOTE, "  Vector inside of loop cost: %d\n",
-                   vec_inside_cost);
-      dump_printf (MSG_NOTE, "  Vector prologue cost: %d\n",
-                   vec_prologue_cost);
-      dump_printf (MSG_NOTE, "  Vector epilogue cost: %d\n",
-                   vec_epilogue_cost);
-      dump_printf (MSG_NOTE, "  Scalar iteration cost: %d\n",
-                   scalar_single_iter_cost);
-      dump_printf (MSG_NOTE, "  Scalar outside cost: %d\n",
-                   scalar_outside_cost);
-      dump_printf (MSG_NOTE, "  Vector outside cost: %d\n",
-                   vec_outside_cost);
-      dump_printf (MSG_NOTE, "  prologue iterations: %d\n",
-                   peel_iters_prologue);
-      dump_printf (MSG_NOTE, "  epilogue iterations: %d\n",
-                   peel_iters_epilogue);
+  if (optinfo_enabled_p ())
+    {
+      /* FIXME: do we want to do something special for such a table of
+	 data?  */
+      OPTINFO_VECT_NOTE
+	<< "Cost model analysis:\n"
+	<< optinfo_printf ("  Vector inside of loop cost: %d\n",
+			   vec_inside_cost)
+	<< optinfo_printf ("  Vector prologue cost: %d\n",
+			   vec_prologue_cost)
+	<< optinfo_printf ("  Vector epilogue cost: %d\n",
+			   vec_epilogue_cost)
+	<< optinfo_printf ("  Scalar iteration cost: %d\n",
+			   scalar_single_iter_cost)
+	<< optinfo_printf ("  Scalar outside cost: %d\n",
+			   scalar_outside_cost)
+	<< optinfo_printf ("  Vector outside cost: %d\n",
+			   vec_outside_cost)
+	<< optinfo_printf ("  prologue iterations: %d\n",
+			   peel_iters_prologue)
+	<< optinfo_printf ("  epilogue iterations: %d",
+			   peel_iters_epilogue);
     }
 
   /* Calculate number of iterations required to make the vector version
@@ -4038,13 +3978,12 @@  vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
 	warning_at (vect_location, OPT_Wopenmp_simd, "vectorization "
 		    "did not happen for a simd loop");
 
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "cost model: the vector iteration cost = %d "
-			 "divided by the scalar iteration cost = %d "
-			 "is greater or equal to the vectorization factor = %d"
-                         ".\n",
-			 vec_inside_cost, scalar_single_iter_cost, assumed_vf);
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE
+	  << optinfo_printf ("cost model: the vector iteration cost = %d "
+			     "divided by the scalar iteration cost = %d "
+			     "is greater or equal to the vectorization factor = %d",
+			     vec_inside_cost, scalar_single_iter_cost, assumed_vf);
       *ret_min_profitable_niters = -1;
       *ret_min_profitable_estimate = -1;
       return;
@@ -4059,10 +3998,10 @@  vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
     /* We want the vectorized loop to execute at least once.  */
     min_profitable_iters = assumed_vf + peel_iters_prologue;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "  Runtime profitability threshold = %d\n",
-                     min_profitable_iters);
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << optinfo_printf ("  Runtime profitability threshold = %d",
+			 min_profitable_iters);
 
   *ret_min_profitable_niters = min_profitable_iters;
 
@@ -4085,10 +4024,10 @@  vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
 				   - vec_inside_cost);
     }
   min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "  Static estimate profitability threshold = %d\n",
-		     min_profitable_estimate);
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << optinfo_printf ("  Static estimate profitability threshold = %d",
+			 min_profitable_estimate);
 
   *ret_min_profitable_estimate = min_profitable_estimate;
 }
@@ -4307,7 +4246,7 @@  vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
 	}
     }
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf (MSG_NOTE, 
                  "vect_model_reduction_cost: inside_cost = %d, "
                  "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
@@ -4337,10 +4276,11 @@  vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
   prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec,
 				 stmt_info, 0, vect_prologue);
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "vect_model_induction_cost: inside_cost = %d, "
-                     "prologue_cost = %d .\n", inside_cost, prologue_cost);
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << optinfo_printf ("vect_model_induction_cost: inside_cost = %d, "
+			 "prologue_cost = %d",
+			 inside_cost, prologue_cost);
 }
 
 
@@ -4930,12 +4870,11 @@  vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
           add_phi_arg (as_a <gphi *> (phi), def, loop_latch_edge (loop),
 		       UNKNOWN_LOCATION);
 
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
             {
-              dump_printf_loc (MSG_NOTE, vect_location,
-			       "transform reduction: created def-use cycle: ");
-              dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
-              dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
+              OPTINFO_VECT_NOTE
+		<< "transform reduction: created def-use cycle: "
+		<< phi << SSA_NAME_DEF_STMT (def);
             }
         }
     }
@@ -5419,9 +5358,9 @@  vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
       /* Case 1:  Create:
          v_out2 = reduc_expr <v_out1>  */
 
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
-			 "Reduce using direct vector reduction.\n");
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_NOTE
+	  << "Reduce using direct vector reduction";
 
       vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
       if (!useless_type_conversion_p (scalar_type, vec_elem_type))
@@ -5690,9 +5629,9 @@  vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
 
           tree rhs;
 
-          if (dump_enabled_p ())
-            dump_printf_loc (MSG_NOTE, vect_location,
-			     "Reduce using vector shifts\n");
+          if (optinfo_enabled_p ())
+            OPTINFO_VECT_NOTE
+	      << "Reduce using vector shifts";
 
 	  mode1 = TYPE_MODE (vectype1);
           vec_dest = vect_create_destination_var (scalar_dest, vectype1);
@@ -5719,9 +5658,9 @@  vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
 	  /* 2.4  Extract the final scalar result.  Create:
 	     s_out3 = extract_field <v_out2, bitpos>  */
 
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location,
-			     "extract scalar result\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE
+	      << "extract scalar result";
 
 	  rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
 			bitsize, bitsize_zero_node);
@@ -5743,9 +5682,9 @@  vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
                  Create:  s = op <s, s'>  // For non SLP cases
                }  */
 
-          if (dump_enabled_p ())
-            dump_printf_loc (MSG_NOTE, vect_location,
-			     "Reduce using scalar code.\n");
+          if (optinfo_enabled_p ())
+            OPTINFO_VECT_NOTE
+	      << "Reduce using scalar code";
 
 	  int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
 	  int element_bitsize = tree_to_uhwi (bitsize);
@@ -6066,11 +6005,11 @@  vect_finalize_reduction:
                                UNKNOWN_LOCATION);
                   add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
                                loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
-                  if (dump_enabled_p ())
+                  if (optinfo_enabled_p ())
                     {
-                      dump_printf_loc (MSG_NOTE, vect_location,
-				       "created double reduction phi node: ");
-                      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
+                      OPTINFO_VECT_NOTE
+			<< "created double reduction phi node: "
+			<< vect_phi;
                     }
 
                   vect_phi_res = PHI_RESULT (vect_phi);
@@ -6749,9 +6688,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
     {
       if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "in-order reduction chain without SLP.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "in-order reduction chain without SLP";
 	  return false;
 	}
 
@@ -6803,9 +6742,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
 	 as a reduction operation.  */
       if (reduc_index == -1)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "conditional reduction chains not supported\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "conditional reduction chains not supported";
 	  return false;
 	}
 
@@ -6828,10 +6767,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
       else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
 					       vectype_in, OPTIMIZE_FOR_SPEED))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "optimizing condition reduction with"
-			     " FOLD_EXTRACT_LAST.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "optimizing condition reduction with FOLD_EXTRACT_LAST";
 	  STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION;
 	}
       else if (cond_reduc_dt == vect_induction_def)
@@ -6869,10 +6807,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
 	    }
 	  if (cond_reduc_val)
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_NOTE, vect_location,
-				 "condition expression based on "
-				 "integer induction.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_NOTE
+		  << "condition expression based on integer induction";
 	      STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
 		= INTEGER_INDUC_COND_REDUCTION;
 	    }
@@ -6895,10 +6832,10 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
 				    cond_initial_val, cond_reduc_val);
 	      if (e && (integer_onep (e) || integer_zerop (e)))
 		{
-		  if (dump_enabled_p ())
-		    dump_printf_loc (MSG_NOTE, vect_location,
-				     "condition expression based on "
-				     "compile time constant.\n");
+		  if (optinfo_enabled_p ())
+		    OPTINFO_VECT_NOTE
+		      << ("condition expression based on "
+			  "compile time constant");
 		  /* Record reduction code at analysis stage.  */
 		  STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info)
 		    = integer_onep (e) ? MAX_EXPR : MIN_EXPR;
@@ -6938,9 +6875,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
       if (!vec_stmt && !vectorizable_condition (stmt, gsi, NULL,
 						ops[reduc_index], 0, NULL))
         {
-          if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "unsupported condition in reduction\n");
+          if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "unsupported condition in reduction";
 	  return false;
         }
     }
@@ -6953,9 +6890,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
 	{
 	  /* Shifts and rotates are only supported by vectorizable_shifts,
 	     not vectorizable_reduction.  */
-          if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "unsupported shift or rotation.\n");
+          if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "unsupported shift or rotation";
 	  return false;
 	}
 
@@ -6963,23 +6900,23 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
       optab = optab_for_tree_code (code, vectype_in, optab_default);
       if (!optab)
         {
-          if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "no optab.\n");
+          if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "no optab";
 
           return false;
         }
 
       if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
         {
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
             dump_printf (MSG_NOTE, "op not supported by target.\n");
 
 	  if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
 	      || !vect_worthwhile_without_simd_p (loop_vinfo, code))
             return false;
 
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
   	    dump_printf (MSG_NOTE, "proceeding using word mode.\n");
         }
 
@@ -6987,9 +6924,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
       if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
 	  && !vect_worthwhile_without_simd_p (loop_vinfo, code))
         {
-          if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not worthwhile without SIMD support.\n");
+          if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "not worthwhile without SIMD support";
 
           return false;
         }
@@ -7093,9 +7030,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
 	      && !direct_internal_fn_supported_p (reduc_fn, vectype_out,
 						  OPTIMIZE_FOR_SPEED))
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "reduc op not supported by target.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE
+		  << "reduc op not supported by target";
 
 	      reduc_fn = IFN_LAST;
 	    }
@@ -7104,9 +7041,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
 	{
 	  if (!nested_cycle || double_reduc)
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "no reduc code for scalar code.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE
+		  << "no reduc code for scalar code";
 
 	      return false;
 	    }
@@ -7129,20 +7066,20 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
       && reduc_fn == IFN_LAST
       && !nunits_out.is_constant ())
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "missing target support for reduction on"
-			 " variable-length vectors.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << ("missing target support for reduction on"
+	      " variable-length vectors");
       return false;
     }
 
   if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
       && ncopies > 1)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "multiple types in double reduction or condition "
-			 "reduction.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << ("multiple types in double reduction or condition "
+	      "reduction");
       return false;
     }
 
@@ -7169,9 +7106,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
 		 l += a[j];
 
 	 which is a reassociation of the original operation.  */
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "in-order double reduction not supported.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "in-order double reduction not supported";
 
       return false;
     }
@@ -7182,9 +7119,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
     {
       /* We cannot use in-order reductions in this case because there is
 	 an implicit reassociation of the operations involved.  */
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "in-order unchained SLP reductions not supported.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "in-order unchained SLP reductions not supported";
       return false;
     }
 
@@ -7197,11 +7134,11 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
       && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
 					  vectype_out, OPTIMIZE_FOR_SPEED))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "reduction on variable-length vectors requires"
-			 " target support for a vector-shift-and-insert"
-			 " operation.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << ("reduction on variable-length vectors requires"
+	      " target support for a vector-shift-and-insert"
+	      " operation");
       return false;
     }
 
@@ -7220,11 +7157,11 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
       if (!neutral_op
 	  && !can_duplicate_and_interleave_p (group_size, elt_mode))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "unsupported form of SLP reduction for"
-			     " variable-length vectors: cannot build"
-			     " initial vector.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << ("unsupported form of SLP reduction for"
+		  " variable-length vectors: cannot build"
+		  " initial vector");
 	  return false;
 	}
       /* The epilogue code relies on the number of elements being a multiple
@@ -7232,11 +7169,11 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
 	 up the the initial vector does too.  */
       if (!multiple_p (nunits_out, group_size))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "unsupported form of SLP reduction for"
-			     " variable-length vectors: the vector size"
-			     " is not a multiple of the number of results.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << ("unsupported form of SLP reduction for"
+		  " variable-length vectors: the vector size"
+		  " is not a multiple of the number of results");
 	  return false;
 	}
     }
@@ -7253,9 +7190,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
         ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
       else
         {
-          if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "invalid types in dot-prod\n");
+          if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "invalid types in dot-prod";
 
           return false;
         }
@@ -7267,10 +7204,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
 
       if (! max_loop_iterations (loop, &ni))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location,
-			     "loop count not known, cannot create cond "
-			     "reduction.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE
+	      << "loop count not known, cannot create cond reduction";
 	  return false;
 	}
       /* Convert backedges to iterations.  */
@@ -7282,9 +7218,9 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
       tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
       if (wi::geu_p (ni, wi::to_widest (max_index)))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location,
-			     "loop size is greater than data size.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE
+	      << "loop size is greater than data size";
 	  return false;
 	}
     }
@@ -7340,10 +7276,10 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
 	  || code == WIDEN_SUM_EXPR
 	  || code == SAD_EXPR))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "multi def-use cycle not possible for lane-reducing "
-			 "reduction operation\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << ("multi def-use cycle not possible for lane-reducing "
+	      "reduction operation");
       return false;
     }
 
@@ -7366,36 +7302,37 @@  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
 		  || !direct_internal_fn_supported_p (cond_fn, vectype_in,
 						      OPTIMIZE_FOR_SPEED)))
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "can't use a fully-masked loop because no"
-				 " conditional operation is available.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE
+		  << ("can't use a fully-masked loop because no"
+		      " conditional operation is available");
 	      LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
 	    }
 	  else if (reduc_index == -1)
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "can't use a fully-masked loop for chained"
-				 " reductions.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE
+		  << ("can't use a fully-masked loop for chained"
+		      " reductions");
 	      LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
 	    }
 	  else
 	    vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
 				   vectype_in);
 	}
-      if (dump_enabled_p ()
+      if (optinfo_enabled_p ()
 	  && reduction_type == FOLD_LEFT_REDUCTION)
-	dump_printf_loc (MSG_NOTE, vect_location,
-			 "using an in-order (fold-left) reduction.\n");
+	OPTINFO_VECT_NOTE
+	  << "using an in-order (fold-left) reduction";
       STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
       return true;
     }
 
   /* Transform.  */
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << "transform reduction";
 
   /* FORNOW: Multiple types are not supported for condition.  */
   if (code == COND_EXPR)
@@ -7689,9 +7626,9 @@  vectorizable_induction (gimple *phi,
 
       if (ncopies > 1)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "multiple types in nested loop.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << "multiple types in nested loop";
 	  return false;
 	}
 
@@ -7720,10 +7657,10 @@  vectorizable_induction (gimple *phi,
 	  if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
 		&& !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "inner-loop induction only used outside "
-				 "of the outer vectorized loop.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE
+		  << ("inner-loop induction only used outside "
+		      "of the outer vectorized loop");
 	      return false;
 	    }
 	}
@@ -7738,19 +7675,17 @@  vectorizable_induction (gimple *phi,
   if (slp_node && !nunits.is_constant ())
     {
       /* The current SLP code creates the initial value element-by-element.  */
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "SLP induction not supported for variable-length"
-			 " vectors.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << ("SLP induction not supported for variable-length"
+	      " vectors.\n");
       return false;
     }
 
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
-                         "=== vectorizable_induction ===\n");
+      VECT_OPTINFO_SCOPE ("vectorizable_induction");
       vect_model_induction_cost (stmt_info, ncopies);
       return true;
     }
@@ -7762,8 +7697,9 @@  vectorizable_induction (gimple *phi,
      evolution S, for a vector of 4 units, we want to compute:
      [X, X + S, X + 2*S, X + 3*S].  */
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << "transform induction phi";
 
   latch_e = loop_latch_edge (iv_loop);
   loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
@@ -8154,23 +8090,20 @@  vectorizable_induction (gimple *phi,
 		      && !STMT_VINFO_LIVE_P (stmt_vinfo));
 
 	  STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-			       "vector of inductions after inner-loop:");
-	      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
+	      OPTINFO_VECT_NOTE
+		<< "vector of inductions after inner-loop:" << new_stmt;
 	    }
 	}
     }
 
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-		       "transform induction: created def-use cycle: ");
-      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
-      dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
-			SSA_NAME_DEF_STMT (vec_def), 0);
+      OPTINFO_VECT_NOTE
+	<< "transform induction: created def-use cycle: "
+	<< induction_phi << SSA_NAME_DEF_STMT (vec_def);
     }
 
   return true;
@@ -8215,10 +8148,9 @@  vectorizable_live_operation (gimple *stmt,
   if (!STMT_VINFO_RELEVANT_P (stmt_info))
     {
       gcc_assert (is_simple_and_all_uses_invariant (stmt, loop_vinfo));
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-			 "statement is simple and uses invariant.  Leaving in "
-			 "place.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << "statement is simple and uses invariant.  Leaving in place";
       return true;
     }
 
@@ -8243,10 +8175,10 @@  vectorizable_live_operation (gimple *stmt,
 	 that vector we need.  */
       if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "Cannot determine which vector holds the"
-			     " final result.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << ("Cannot determine which vector holds the"
+		  " final result");
 	  return false;
 	}
     }
@@ -8259,27 +8191,27 @@  vectorizable_live_operation (gimple *stmt,
 	  if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
 					       OPTIMIZE_FOR_SPEED))
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "can't use a fully-masked loop because "
-				 "the target doesn't support extract last "
-				 "reduction.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE
+		  << ("can't use a fully-masked loop because "
+		      "the target doesn't support extract last "
+		      "reduction");
 	      LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
 	    }
 	  else if (slp_node)
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "can't use a fully-masked loop because an "
-				 "SLP statement is live after the loop.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE
+		  << ("can't use a fully-masked loop because an "
+		      "SLP statement is live after the loop");
 	      LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
 	    }
 	  else if (ncopies > 1)
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "can't use a fully-masked loop because"
-				 " ncopies is greater than 1.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE
+		  << ("can't use a fully-masked loop because"
+		      " ncopies is greater than 1.\n");
 	      LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
 	    }
 	  else
@@ -8424,9 +8356,9 @@  vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt)
 	    {
 	      if (gimple_debug_bind_p (ustmt))
 		{
-		  if (dump_enabled_p ())
-		    dump_printf_loc (MSG_NOTE, vect_location,
-                                     "killing debug use\n");
+		  if (optinfo_enabled_p ())
+		    OPTINFO_VECT_NOTE
+		      << "killing debug use";
 
 		  gimple_debug_bind_reset_value (ustmt);
 		  update_stmt (ustmt);
@@ -8625,8 +8557,7 @@  vect_transform_loop (loop_vec_info loop_vinfo)
   bool check_profitability = false;
   unsigned int th;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n");
+  VECT_OPTINFO_SCOPE ("vec_transform_loop");
 
   /* Use the more conservative vectorization threshold.  If the number
      of iterations is constant assume the cost check has been performed
@@ -8637,10 +8568,10 @@  vect_transform_loop (loop_vec_info loop_vinfo)
   if (th >= vect_vf_for_cost (loop_vinfo)
       && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-			 "Profitability threshold is %d loop iterations.\n",
-                         th);
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << optinfo_printf ("Profitability threshold is %d loop iterations",
+			     th);
       check_profitability = true;
     }
 
@@ -8650,7 +8581,7 @@  vect_transform_loop (loop_vec_info loop_vinfo)
   if (! single_pred_p (e->dest))
     {
       split_loop_exit_edge (e);
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	dump_printf (MSG_NOTE, "split exit edge\n");
     }
 
@@ -8684,7 +8615,7 @@  vect_transform_loop (loop_vec_info loop_vinfo)
       if (! single_pred_p (e->dest))
 	{
 	  split_loop_exit_edge (e);
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
 	}
     }
@@ -8739,11 +8670,10 @@  vect_transform_loop (loop_vec_info loop_vinfo)
 	   gsi_next (&si))
         {
 	  gphi *phi = si.phi ();
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-                               "------>vectorizing phi: ");
-	      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
+	      OPTINFO_VECT_NOTE
+		<< "------>vectorizing phi: " << phi;
 	    }
 	  stmt_info = vinfo_for_stmt (phi);
 	  if (!stmt_info)
@@ -8759,16 +8689,18 @@  vect_transform_loop (loop_vec_info loop_vinfo)
 	  if (STMT_VINFO_VECTYPE (stmt_info)
 	      && (maybe_ne
 		  (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
-	      && dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
+	      && optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE
+	      << "multiple-types";
 
 	  if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
 	       || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
 	       || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
 	      && ! PURE_SLP_STMT (stmt_info))
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_NOTE
+		  << "transform phi";
 	      vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
 	    }
 	}
@@ -8794,11 +8726,10 @@  vect_transform_loop (loop_vec_info loop_vinfo)
 		}
 	    }
 
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-			       "------>vectorizing statement: ");
-	      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
+	      OPTINFO_VECT_NOTE
+		<< "------>vectorizing statement: " << stmt;
 	    }
 
 	  stmt_info = vinfo_for_stmt (stmt);
@@ -8866,13 +8797,11 @@  vect_transform_loop (loop_vec_info loop_vinfo)
 
 		  if (!gsi_end_p (pattern_def_si))
 		    {
-		      if (dump_enabled_p ())
+		      if (optinfo_enabled_p ())
 			{
-			  dump_printf_loc (MSG_NOTE, vect_location,
-					   "==> vectorizing pattern def "
-					   "stmt: ");
-			  dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
-					    pattern_def_stmt, 0);
+			  OPTINFO_VECT_NOTE
+			    << "==> vectorizing pattern def stmt: "
+			    << pattern_def_stmt;
 			}
 
 		      stmt = pattern_def_stmt;
@@ -8894,10 +8823,11 @@  vect_transform_loop (loop_vec_info loop_vinfo)
 		= TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
 	      if (!STMT_SLP_TYPE (stmt_info)
 		  && maybe_ne (nunits, vf)
-		  && dump_enabled_p ())
+		  && optinfo_enabled_p ())
 		  /* For SLP VF is set according to unrolling factor, and not
 		     to vector size, hence for SLP this print is not valid.  */
-		dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
+		OPTINFO_VECT_NOTE
+		  << "multiple-types";
 	    }
 
 	  /* SLP. Schedule all the SLP instances when the first SLP stmt is
@@ -8908,9 +8838,7 @@  vect_transform_loop (loop_vec_info loop_vinfo)
 		{
 		  slp_scheduled = true;
 
-		  if (dump_enabled_p ())
-		    dump_printf_loc (MSG_NOTE, vect_location,
-				     "=== scheduling SLP instances ===\n");
+		  VECT_OPTINFO_SCOPE ("scheduling SLP instances");
 
 		  vect_schedule_slp (loop_vinfo);
 		}
@@ -8928,8 +8856,9 @@  vect_transform_loop (loop_vec_info loop_vinfo)
 	    }
 
 	  /* -------- vectorize statement ------------ */
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE
+	      << "transform statement";
 
 	  grouped_store = false;
 	  is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL);
@@ -9040,23 +8969,23 @@  vect_transform_loop (loop_vec_info loop_vinfo)
 	 : wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
 			   assumed_vf) - 1);
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
+      /* FIXME: I've converted these from "notes" to "successes".  */
       if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "LOOP VECTORIZED\n");
+	  OPTINFO_VECT_SUCCESS
+	    << "LOOP VECTORIZED";
 	  if (loop->inner)
-	    dump_printf_loc (MSG_NOTE, vect_location,
-			     "OUTER LOOP VECTORIZED\n");
-	  dump_printf (MSG_NOTE, "\n");
+	    OPTINFO_VECT_SUCCESS
+	      << "OUTER LOOP VECTORIZED";
 	}
       else
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "LOOP EPILOGUE VECTORIZED (VS=");
-	  dump_dec (MSG_NOTE, current_vector_size);
-	  dump_printf (MSG_NOTE, ")\n");
+	  OPTINFO_VECT_SUCCESS
+	    << "LOOP EPILOGUE VECTORIZED (VS="
+	    << current_vector_size
+	    << ")";
 	}
     }
 
@@ -9159,6 +9088,8 @@  optimize_mask_stores (struct loop *loop)
   auto_vec<gimple *> worklist;
 
   vect_location = find_loop_location (loop);
+  vect_optinfo_location = loop;
+
   /* Pick up all masked stores in loop if any.  */
   for (i = 0; i < nbbs; i++)
     {
@@ -9210,10 +9141,10 @@  optimize_mask_stores (struct loop *loop)
       make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
       if (dom_info_available_p (CDI_DOMINATORS))
 	set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-			 "Create new block %d to sink mask stores.",
-			 store_bb->index);
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << optinfo_printf ("Create new block %d to sink mask stores.",
+			     store_bb->index);
       /* Create vector comparison with boolean result.  */
       vectype = TREE_TYPE (mask);
       zero = build_zero_cst (vectype);
@@ -9249,11 +9180,10 @@  optimize_mask_stores (struct loop *loop)
 	  gsi_move_before (&gsi_from, &gsi_to);
 	  /* Setup GSI_TO to the non-empty block start.  */
 	  gsi_to = gsi_start_bb (store_bb);
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-			       "Move stmt to created bb\n");
-	      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0);
+	      OPTINFO_VECT_NOTE
+		<< "Move stmt to created bb " << last;
 	    }
 	  /* Move all stored value producers if possible.  */
 	  while (!gsi_end_p (gsi))
@@ -9317,12 +9247,10 @@  optimize_mask_stores (struct loop *loop)
 		break;
 
 	      /* Can move STMT1 to STORE_BB.  */
-	      if (dump_enabled_p ())
-		{
-		  dump_printf_loc (MSG_NOTE, vect_location,
-				   "Move stmt to created bb\n");
-		  dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
-		}
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_NOTE
+		  << "move stmt to created bb"
+		  << stmt1;
 	      gsi_move_before (&gsi_from, &gsi_to);
 	      /* Shift GSI_TO for further insertion.  */
 	      gsi_prev (&gsi_to);
diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
index 621ed07..f38efac 100644
--- a/gcc/tree-vect-patterns.c
+++ b/gcc/tree-vect-patterns.c
@@ -448,7 +448,7 @@  vect_recog_dot_prod_pattern (vec<gimple *> *stmts, tree *type_in,
   pattern_stmt = gimple_build_assign (var, DOT_PROD_EXPR,
 				      oprnd00, oprnd01, oprnd1);
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
       dump_printf_loc (MSG_NOTE, vect_location,
                        "vect_recog_dot_prod_pattern: detected: ");
@@ -682,7 +682,7 @@  vect_recog_sad_pattern (vec<gimple *> *stmts, tree *type_in,
   gimple *pattern_stmt = gimple_build_assign (var, SAD_EXPR, sad_oprnd0,
 					      sad_oprnd1, plus_oprnd1);
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
       dump_printf_loc (MSG_NOTE, vect_location,
                        "vect_recog_sad_pattern: detected: ");
@@ -964,7 +964,7 @@  vect_recog_widen_mult_pattern (vec<gimple *> *stmts,
 	       TYPE_UNSIGNED (type));
 
   /* Pattern detected.  */
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
                      "vect_recog_widen_mult_pattern: detected:\n");
 
@@ -1015,7 +1015,7 @@  vect_recog_widen_mult_pattern (vec<gimple *> *stmts,
 					  gimple_assign_lhs (pattern_stmt));
     }
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
 
   stmts->safe_push (last_stmt);
@@ -1285,7 +1285,7 @@  vect_recog_widen_sum_pattern (vec<gimple *> *stmts, tree *type_in,
   var = vect_recog_temp_ssa_var (type, NULL);
   pattern_stmt = gimple_build_assign (var, WIDEN_SUM_EXPR, oprnd0, oprnd1);
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
       dump_printf_loc (MSG_NOTE, vect_location,
                        "vect_recog_widen_sum_pattern: detected: ");
@@ -1584,7 +1584,7 @@  vect_recog_over_widening_pattern (vec<gimple *> *stmts,
       STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)) = pattern_stmt;
       new_pattern_def_seq (vinfo_for_stmt (stmt), new_def_stmt);
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
           dump_printf_loc (MSG_NOTE, vect_location,
                            "created pattern stmt: ");
@@ -1651,7 +1651,7 @@  vect_recog_over_widening_pattern (vec<gimple *> *stmts,
     return NULL;
 
   /* Pattern detected.  */
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
       dump_printf_loc (MSG_NOTE, vect_location,
                        "vect_recog_over_widening_pattern: detected: ");
@@ -1788,7 +1788,7 @@  vect_recog_widen_shift_pattern (vec<gimple *> *stmts,
     return NULL;
 
   /* Pattern detected.  */
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
                      "vect_recog_widen_shift_pattern: detected:\n");
 
@@ -1821,7 +1821,7 @@  vect_recog_widen_shift_pattern (vec<gimple *> *stmts,
       STMT_VINFO_VECTYPE (new_stmt_info) = vectype;
     }
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
 
   stmts->safe_push (last_stmt);
@@ -2058,7 +2058,7 @@  vect_recog_rotate_pattern (vec<gimple *> *stmts, tree *type_in, tree *type_out)
   append_pattern_def_seq (stmt_vinfo, def_stmt);
 
   /* Pattern detected.  */
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
 		     "vect_recog_rotate_pattern: detected:\n");
 
@@ -2066,7 +2066,7 @@  vect_recog_rotate_pattern (vec<gimple *> *stmts, tree *type_in, tree *type_out)
   var = vect_recog_temp_ssa_var (type, NULL);
   pattern_stmt = gimple_build_assign (var, BIT_IOR_EXPR, var1, var2);
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
 
   stmts->safe_push (last_stmt);
@@ -2197,7 +2197,7 @@  vect_recog_vector_vector_shift_pattern (vec<gimple *> *stmts,
     }
 
   /* Pattern detected.  */
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
                      "vect_recog_vector_vector_shift_pattern: detected:\n");
 
@@ -2205,7 +2205,7 @@  vect_recog_vector_vector_shift_pattern (vec<gimple *> *stmts,
   var = vect_recog_temp_ssa_var (TREE_TYPE (oprnd0), NULL);
   pattern_stmt = gimple_build_assign (var, rhs_code, oprnd0, def);
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
 
   stmts->safe_push (last_stmt);
@@ -2574,11 +2574,11 @@  vect_recog_mult_pattern (vec<gimple *> *stmts,
     return NULL;
 
   /* Pattern detected.  */
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
 		     "vect_recog_mult_pattern: detected:\n");
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM,
 			  pattern_stmt,0);
 
@@ -2692,7 +2692,7 @@  vect_recog_divmod_pattern (vec<gimple *> *stmts,
 	return NULL;
 
       /* Pattern detected.  */
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         dump_printf_loc (MSG_NOTE, vect_location,
                          "vect_recog_divmod_pattern: detected:\n");
 
@@ -2778,7 +2778,7 @@  vect_recog_divmod_pattern (vec<gimple *> *stmts,
 				   signmask);
 	}
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt,
                               0);
 
@@ -3032,7 +3032,7 @@  vect_recog_divmod_pattern (vec<gimple *> *stmts,
     }
 
   /* Pattern detected.  */
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
       dump_printf_loc (MSG_NOTE, vect_location,
                        "vect_recog_divmod_pattern: detected: ");
@@ -3197,7 +3197,7 @@  vect_recog_mixed_size_cond_pattern (vec<gimple *> *stmts, tree *type_in,
   *type_in = vecitype;
   *type_out = vectype;
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
                      "vect_recog_mixed_size_cond_pattern: detected:\n");
 
@@ -3778,9 +3778,9 @@  vect_recog_bool_pattern (vec<gimple *> *stmts, tree *type_in,
       *type_out = vectype;
       *type_in = vectype;
       stmts->safe_push (last_stmt);
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-                         "vect_recog_bool_pattern: detected:\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << "vect_recog_bool_pattern: detected";
 
       return pattern_stmt;
     }
@@ -3819,9 +3819,9 @@  vect_recog_bool_pattern (vec<gimple *> *stmts, tree *type_in,
       *type_out = vectype;
       *type_in = vectype;
       stmts->safe_push (last_stmt);
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-                         "vect_recog_bool_pattern: detected:\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << "vect_recog_bool_pattern: detected";
 
       return pattern_stmt;
     }
@@ -3879,9 +3879,9 @@  vect_recog_bool_pattern (vec<gimple *> *stmts, tree *type_in,
       *type_out = vectype;
       *type_in = vectype;
       stmts->safe_push (last_stmt);
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-                         "vect_recog_bool_pattern: detected:\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << "vect_recog_bool_pattern: detected:";
       return pattern_stmt;
     }
   else
@@ -4017,9 +4017,9 @@  vect_recog_mask_conversion_pattern (vec<gimple *> *stmts, tree *type_in,
       *type_out = vectype1;
       *type_in = vectype1;
       stmts->safe_push (last_stmt);
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-                         "vect_recog_mask_conversion_pattern: detected:\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << "vect_recog_mask_conversion_pattern: detected";
 
       return pattern_stmt;
     }
@@ -4143,9 +4143,9 @@  vect_recog_mask_conversion_pattern (vec<gimple *> *stmts, tree *type_in,
       *type_out = vectype1;
       *type_in = vectype1;
       stmts->safe_push (last_stmt);
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-                         "vect_recog_mask_conversion_pattern: detected:\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << "vect_recog_mask_conversion_pattern: detected";
 
       return pattern_stmt;
     }
@@ -4191,9 +4191,9 @@  vect_recog_mask_conversion_pattern (vec<gimple *> *stmts, tree *type_in,
   *type_out = vectype1;
   *type_in = vectype1;
   stmts->safe_push (last_stmt);
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "vect_recog_mask_conversion_pattern: detected:\n");
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << "vect_recog_mask_conversion_pattern: detected";
 
   return pattern_stmt;
 }
@@ -4377,9 +4377,9 @@  vect_try_gather_scatter_pattern (gimple *stmt, stmt_vec_info last_stmt_info,
   *type_out = vectype;
   *type_in = vectype;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "gather/scatter pattern detected:\n");
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << "gather/scatter pattern detected";
 
   return pattern_stmt;
 }
@@ -4532,11 +4532,11 @@  vect_pattern_recog_1 (vect_recog_func *recog_func,
     }
 
   /* Found a vectorizable pattern.  */
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-                       "%s pattern recognized: ", recog_func->name);
-      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
+      OPTINFO_VECT_NOTE
+	<< optinfo_printf ("%s pattern recognized: ", recog_func->name)
+	<< pattern_stmt;
     }
 
   /* Mark the stmts that are involved in the pattern. */
@@ -4558,11 +4558,11 @@  vect_pattern_recog_1 (vect_recog_func *recog_func,
     {
       stmt_info = vinfo_for_stmt (stmt);
       pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
-          dump_printf_loc (MSG_NOTE, vect_location,
-                           "additional pattern stmt: ");
-          dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
+	  OPTINFO_VECT_NOTE
+	    << "additional pattern stmt: "
+	    << pattern_stmt;
         }
 
       vect_mark_pattern_stmts (stmt, pattern_stmt, NULL_TREE);
@@ -4660,9 +4660,7 @@  vect_pattern_recog (vec_info *vinfo)
   auto_vec<gimple *, 1> stmts_to_replace;
   gimple *stmt;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_pattern_recog ===\n");
+  VECT_OPTINFO_SCOPE ("vect_pattern_recog");
 
   if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
     {
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index 73aa227..37fd3a3 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -45,6 +45,7 @@  along with GCC; see the file COPYING3.  If not see
 #include "vec-perm-indices.h"
 #include "gimple-fold.h"
 #include "internal-fn.h"
+#include "optinfo.h"
 
 
 /* Recursively free the memory allocated for the SLP tree rooted at NODE.  */
@@ -349,12 +350,11 @@  again:
 
       if (!vect_is_simple_use (oprnd, vinfo, &def_stmt, &dt))
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			       "Build SLP failed: can't analyze def for ");
-	      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
-              dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+	      OPTINFO_VECT_FAILURE
+		<< "Build SLP failed: can't analyze def for "
+		<< slim (oprnd);
 	    }
 
 	  return -1;
@@ -385,13 +385,12 @@  again:
 		  goto again;
 		}
 
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				   "Build SLP failed: some of the stmts"
-				   " are in a pattern, and others are not ");
-		  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
-                  dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+		  OPTINFO_VECT_FAILURE
+		    << ("Build SLP failed: some of the stmts"
+			" are in a pattern, and others are not ")
+		    << slim (oprnd);
 		}
 
 	      return 1;
@@ -402,9 +401,9 @@  again:
 
           if (dt == vect_unknown_def_type)
             {
-              if (dump_enabled_p ())
-                dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "Unsupported pattern.\n");
+              if (optinfo_enabled_p ())
+                OPTINFO_VECT_FAILURE << optinfo_printf (
+				       "Unsupported pattern");
               return -1;
             }
 
@@ -415,8 +414,8 @@  again:
 	      break;
 
 	    default:
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE << optinfo_printf (
 				 "unsupported defining stmt:\n");
 	      return -1;
             }
@@ -457,8 +456,8 @@  again:
 		  goto again;
 		}
 
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE << optinfo_printf (
 				 "Build SLP failed: different types\n");
 
 	      return 1;
@@ -470,13 +469,12 @@  again:
 		  || !can_duplicate_and_interleave_p (stmts.length (),
 						      TYPE_MODE (type))))
 	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				   "Build SLP failed: invalid type of def "
-				   "for variable-length SLP ");
-		  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
-		  dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+		  OPTINFO_VECT_FAILURE
+		    << ("Build SLP failed: invalid type of def "
+			"for variable-length SLP ")
+		    << slim (oprnd);
 		}
 	      return -1;
 	    }
@@ -497,12 +495,11 @@  again:
 
 	default:
 	  /* FORNOW: Not supported.  */
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			       "Build SLP failed: illegal type of def ");
-	      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
-              dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+	      OPTINFO_VECT_FAILURE
+		<< "Build SLP failed: illegal type of def "
+		<< slim (oprnd);
 	    }
 
 	  return -1;
@@ -516,12 +513,12 @@  again:
          we've committed to the operand order and can't swap it.  */
       if (STMT_VINFO_NUM_SLP_USES (vinfo_for_stmt (stmt)) != 0)
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			       "Build SLP failed: cannot swap operands of "
-			       "shared stmt ");
-	      dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+	      OPTINFO_VECT_FAILURE 
+		<< ("Build SLP failed: cannot swap operands of "
+		    "shared stmt ")
+		<< stmt;
 	    }
 	  return -1;
 	}
@@ -552,11 +549,11 @@  again:
       else
 	swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
 			   gimple_assign_rhs2_ptr (stmt));
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "swapped operands to match def types in ");
-	  dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
+	  OPTINFO_VECT_NOTE
+	    << "swapped operands to match def types in "
+	    << stmt;
 	}
     }
 
@@ -577,12 +574,11 @@  vect_record_max_nunits (vec_info *vinfo, gimple *stmt, unsigned int group_size,
 {
   if (!vectype)
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			   "Build SLP failed: unsupported data-type in ");
-	  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
-	  dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+	  OPTINFO_VECT_FAILURE
+	    << "Build SLP failed: unsupported data-type in "
+	    << stmt;
 	}
       /* Fatal mismatch.  */
       return false;
@@ -596,9 +592,9 @@  vect_record_max_nunits (vec_info *vinfo, gimple *stmt, unsigned int group_size,
       && (!nunits.is_constant (&const_nunits)
 	  || const_nunits > group_size))
     {
-      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-		       "Build SLP failed: unrolling required "
-		       "in basic block SLP\n");
+      OPTINFO_VECT_FAILURE
+	<< ("Build SLP failed: unrolling required "
+	    "in basic block SLP");
       /* Fatal mismatch.  */
       return false;
     }
@@ -650,20 +646,21 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
       swap[i] = 0;
       matches[i] = false;
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
-	  dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
+	  OPTINFO_VECT_NOTE
+	    << "Build SLP for "
+	    << stmt;
 	}
 
       /* Fail to vectorize statements marked as unvectorizable.  */
       if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
         {
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
             {
-              dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			       "Build SLP failed: unvectorizable statement ");
-              dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+              OPTINFO_VECT_FAILURE
+		<< "Build SLP failed: unvectorizable statement "
+		<< stmt;
             }
 	  /* Fatal mismatch.  */
 	  matches[0] = false;
@@ -673,12 +670,12 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
       lhs = gimple_get_lhs (stmt);
       if (lhs == NULL_TREE)
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			       "Build SLP failed: not GIMPLE_ASSIGN nor "
-			       "GIMPLE_CALL ");
-	      dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+	      OPTINFO_VECT_FAILURE
+		<< ("Build SLP failed: not GIMPLE_ASSIGN nor "
+		    "GIMPLE_CALL ")
+		<< stmt;
 	    }
 	  /* Fatal mismatch.  */
 	  matches[0] = false;
@@ -704,12 +701,11 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
 	      || !gimple_call_nothrow_p (call_stmt)
 	      || gimple_call_chain (call_stmt))
 	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
-				   "Build SLP failed: unsupported call type ");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				    call_stmt, 0);
+		  OPTINFO_VECT_FAILURE
+		    << "Build SLP failed: unsupported call type "
+		    << call_stmt;
 		}
 	      /* Fatal mismatch.  */
 	      matches[0] = false;
@@ -745,9 +741,9 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
 
 		  if (!optab)
 		    {
-		      if (dump_enabled_p ())
-			dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-					 "Build SLP failed: no optab.\n");
+		      if (optinfo_enabled_p ())
+			OPTINFO_VECT_FAILURE
+			  << "Build SLP failed: no optab";
 		      /* Fatal mismatch.  */
 		      matches[0] = false;
 		      return false;
@@ -755,10 +751,10 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
 		  icode = (int) optab_handler (optab, vec_mode);
 		  if (icode == CODE_FOR_nothing)
 		    {
-		      if (dump_enabled_p ())
-			dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-					 "Build SLP failed: "
-					 "op not supported by target.\n");
+		      if (optinfo_enabled_p ())
+			OPTINFO_VECT_FAILURE
+			  << ("Build SLP failed: "
+			      "op not supported by target");
 		      /* Fatal mismatch.  */
 		      matches[0] = false;
 		      return false;
@@ -801,16 +797,14 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
                        || first_stmt_code == COMPONENT_REF
                        || first_stmt_code == MEM_REF)))
 	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
-				   "Build SLP failed: different operation "
-				   "in stmt ");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				   "original stmt ");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				    first_stmt, 0);
+		  OPTINFO_VECT_FAILURE
+		    << ("Build SLP failed: different operation "
+			"in stmt ")
+		    << stmt
+		    << "original stmt "
+		    << first_stmt;
 		}
 	      /* Mismatch.  */
 	      continue;
@@ -819,12 +813,12 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
 	  if (need_same_oprnds
 	      && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
 	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
-				   "Build SLP failed: different shift "
-				   "arguments in ");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+		  OPTINFO_VECT_FAILURE
+		    << ("Build SLP failed: different shift "
+			"arguments in ")
+		    << stmt;
 		}
 	      /* Mismatch.  */
 	      continue;
@@ -839,12 +833,11 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
 		  || gimple_call_fntype (first_stmt)
 		     != gimple_call_fntype (stmt))
 		{
-		  if (dump_enabled_p ())
+		  if (optinfo_enabled_p ())
 		    {
-		      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
-				       "Build SLP failed: different calls in ");
-		      dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-					stmt, 0);
+		      OPTINFO_VECT_FAILURE
+			<< "Build SLP failed: different calls in "
+			<< stmt;
 		    }
 		  /* Mismatch.  */
 		  continue;
@@ -870,14 +863,12 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
                      chains in the same node.  */
                   if (prev_first_load != first_load)
                     {
-                      if (dump_enabled_p ())
+                      if (optinfo_enabled_p ())
                         {
-                          dump_printf_loc (MSG_MISSED_OPTIMIZATION,
-					   vect_location, 
-					   "Build SLP failed: different "
-					   "interleaving chains in one node ");
-                          dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-					    stmt, 0);
+			  OPTINFO_VECT_FAILURE
+			    <<  ("Build SLP failed: different "
+				 "interleaving chains in one node ")
+			    << stmt;
                         }
 		      /* Mismatch.  */
 		      continue;
@@ -892,11 +883,11 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
 	  if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
 	    {
 	      /* Not grouped load.  */
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
-				   "Build SLP failed: not grouped load ");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+		  OPTINFO_VECT_FAILURE
+		    << "Build SLP failed: not grouped load "
+		    << stmt;
 		}
 
 	      /* FORNOW: Not grouped loads are not supported.  */
@@ -912,12 +903,11 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
 	      && TREE_CODE_CLASS (rhs_code) != tcc_comparison
 	      && rhs_code != CALL_EXPR)
 	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				   "Build SLP failed: operation");
-		  dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+		  OPTINFO_VECT_FAILURE
+		    << "Build SLP failed: operation unsupported "
+		    << stmt;
 		}
 	      /* Fatal mismatch.  */
 	      matches[0] = false;
@@ -950,13 +940,11 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
 		swap[i] = 2;
 	      else
 		{
-		  if (dump_enabled_p ())
+		  if (optinfo_enabled_p ())
 		    {
-		      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				       "Build SLP failed: different"
-				       " operation");
-		      dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-					stmt, 0);
+		      OPTINFO_VECT_FAILURE
+			<< "Build SLP failed: different operation"
+			<< stmt;
 		    }
 		  /* Mismatch.  */
 		  continue;
@@ -980,10 +968,10 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
       unsigned HOST_WIDE_INT count;
       if (!nunits.is_constant (&count))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "Build SLP failed: different operations "
-			     "not allowed with variable-length SLP.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE
+	      << ("Build SLP failed: different operations "
+		  "not allowed with variable-length SLP");
 	  return false;
 	}
       vec_perm_builder sel (count, count, 1);
@@ -1001,17 +989,14 @@  vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
 	    if (gimple_assign_rhs_code (stmts[i]) == alt_stmt_code)
 	      {
 		matches[i] = false;
-		if (dump_enabled_p ())
+		if (optinfo_enabled_p ())
 		  {
-		    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				     "Build SLP failed: different operation "
-				     "in stmt ");
-		    dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				      stmts[i], 0);
-		    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				     "original stmt ");
-		    dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				      first_stmt, 0);
+		    OPTINFO_VECT_FAILURE
+		      << ("Build SLP failed: different operation "
+			  "in stmt ")
+		      << stmts[i]
+		      << "original stmt "
+		      << first_stmt;
 		  }
 	      }
 	  return false;
@@ -1258,7 +1243,7 @@  vect_build_slp_tree_2 (vec_info *vinfo,
 		    vect_free_slp_tree (grandchild);
 		  SLP_TREE_CHILDREN (child).truncate (0);
 
-		  dump_printf_loc (MSG_NOTE, vect_location,
+		  OPTINFO_VECT_NOTE << optinfo_printf (
 				   "Building parent vector operands from "
 				   "scalars instead\n");
 		  oprnd_info->def_stmts = vNULL;
@@ -1288,7 +1273,7 @@  vect_build_slp_tree_2 (vec_info *vinfo,
 	     scalar version.  */
 	  && !is_pattern_stmt_p (vinfo_for_stmt (stmt)))
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location,
+	  OPTINFO_VECT_NOTE << optinfo_printf (
 			   "Building vector operands from scalars\n");
 	  child = vect_create_new_slp_node (oprnd_info->def_stmts);
 	  SLP_TREE_DEF_TYPE (child) = vect_external_def;
@@ -1344,14 +1329,12 @@  vect_build_slp_tree_2 (vec_info *vinfo,
 		    {
 		      if (!swap_not_matching)
 			{
-			  if (dump_enabled_p ())
+			  if (optinfo_enabled_p ())
 			    {
-			      dump_printf_loc (MSG_MISSED_OPTIMIZATION,
-					       vect_location,
-					       "Build SLP failed: cannot swap "
-					       "operands of shared stmt ");
-			      dump_gimple_stmt (MSG_MISSED_OPTIMIZATION,
-						TDF_SLIM, stmts[j], 0);
+			      OPTINFO_VECT_FAILURE
+				<< ("Build SLP failed: cannot swap "
+				    "operands of shared stmt ")
+				<< stmts[j];
 			    }
 			  goto fail;
 			}
@@ -1363,7 +1346,7 @@  vect_build_slp_tree_2 (vec_info *vinfo,
 	  while (j != group_size);
 
 	  /* Swap mismatched definition stmts.  */
-	  dump_printf_loc (MSG_NOTE, vect_location,
+	  OPTINFO_VECT_NOTE << optinfo_printf (
 			   "Re-trying with swapped operands of stmts ");
 	  for (j = 0; j < group_size; ++j)
 	    if (matches[j] == !swap_not_matching)
@@ -1438,7 +1421,7 @@  vect_build_slp_tree_2 (vec_info *vinfo,
 			vect_free_slp_tree (grandchild);
 		      SLP_TREE_CHILDREN (child).truncate (0);
 
-		      dump_printf_loc (MSG_NOTE, vect_location,
+		      OPTINFO_VECT_NOTE << optinfo_printf (
 				       "Building parent vector operands from "
 				       "scalars instead\n");
 		      oprnd_info->def_stmts = vNULL;
@@ -1656,9 +1639,9 @@  vect_supported_load_permutation_p (slp_instance slp_instn)
   slp_tree node;
   gimple *stmt, *load, *next_load;
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
+      OPTINFO_VECT_NOTE << optinfo_printf ( "Load permutation ");
       FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
 	if (node->load_permutation.exists ())
 	  FOR_EACH_VEC_ELT (node->load_permutation, j, next)
@@ -1733,9 +1716,9 @@  vect_supported_load_permutation_p (slp_instance slp_instn)
 	      if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits)
 		  || maxk >= (GROUP_SIZE (group_info) & ~(nunits - 1)))
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				   "BB vectorization with gaps at the end of "
-				   "a load is not supported\n");
+		  OPTINFO_VECT_FAILURE
+		    << ("BB vectorization with gaps at the end of "
+			"a load is not supported");
 		  return false;
 		}
 
@@ -1745,9 +1728,8 @@  vect_supported_load_permutation_p (slp_instance slp_instn)
 	      if (!vect_transform_slp_perm_load (node, tem, NULL,
 						 1, slp_instn, true, &n_perms))
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION,
-				   vect_location,
-				   "unsupported load permutation\n");
+		  OPTINFO_VECT_FAILURE
+		    << "unsupported load permutation";
 		  return false;
 		}
 	    }
@@ -2114,8 +2096,8 @@  vect_split_slp_store_group (gimple *first_stmt, unsigned group1_size)
   /* GROUP_GAP of the first group now has to skip over the second group too.  */
   GROUP_GAP (first_vinfo) += group2_size;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "Split group into %d and %d\n",
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE << optinfo_printf ( "Split group into %d and %d\n",
 		     group1_size, group2_size);
 
   return group2;
@@ -2172,12 +2154,11 @@  vect_analyze_slp_instance (vec_info *vinfo,
 
   if (!vectype)
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
-          dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			   "Build SLP failed: unsupported data-type ");
-          dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
-          dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+          OPTINFO_VECT_FAILURE
+	    << "Build SLP failed: unsupported data-type "
+	    << slim (scalar_type);
         }
 
       return false;
@@ -2238,8 +2219,8 @@  vect_analyze_slp_instance (vec_info *vinfo,
 	  if (!max_nunits.is_constant (&const_max_nunits)
 	      || const_max_nunits > group_size)
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE << optinfo_printf (
 				 "Build SLP failed: store group "
 				 "size not a multiple of the vector size "
 				 "in basic block SLP\n");
@@ -2301,13 +2282,12 @@  vect_analyze_slp_instance (vec_info *vinfo,
         {
           if (!vect_supported_load_permutation_p (new_instance))
             {
-              if (dump_enabled_p ())
+              if (optinfo_enabled_p ())
                 {
-                  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				   "Build SLP failed: unsupported load "
-				   "permutation ");
-		      dump_gimple_stmt (MSG_MISSED_OPTIMIZATION,
-					TDF_SLIM, stmt, 0);
+                  OPTINFO_VECT_FAILURE
+		    << ("Build SLP failed: unsupported load "
+			"permutation ")
+		    << stmt;
                 }
               vect_free_slp_instance (new_instance);
               return false;
@@ -2336,8 +2316,8 @@  vect_analyze_slp_instance (vec_info *vinfo,
 	    }
 	  if (i == loads.length ())
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE << optinfo_printf (
 				 "Built SLP cancelled: can use "
 				 "load/store-lanes\n");
 	      vect_free_slp_instance (new_instance);
@@ -2347,9 +2327,9 @@  vect_analyze_slp_instance (vec_info *vinfo,
 
       vinfo->slp_instances.safe_push (new_instance);
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location,
+	  OPTINFO_VECT_NOTE << optinfo_printf (
 			   "Final SLP tree for instance:\n");
 	  vect_print_slp_tree (MSG_NOTE, vect_location, node);
 	}
@@ -2415,8 +2395,7 @@  vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size)
   unsigned int i;
   gimple *first_element;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
+  VECT_OPTINFO_SCOPE ("vect_analyze_slp");
 
   /* Find SLP sequences starting from groups of grouped stores.  */
   FOR_EACH_VEC_ELT (vinfo->grouped_stores, i, first_element)
@@ -2469,9 +2448,7 @@  vect_make_slp_decision (loop_vec_info loop_vinfo)
   slp_instance instance;
   int decided_to_slp = 0;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
-                     "\n");
+  VECT_OPTINFO_SCOPE ("vect_make_slp_decision");
 
   FOR_EACH_VEC_ELT (slp_instances, i, instance)
     {
@@ -2491,13 +2468,12 @@  vect_make_slp_decision (loop_vec_info loop_vinfo)
 
   LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
 
-  if (decided_to_slp && dump_enabled_p ())
+  if (decided_to_slp && optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-		       "Decided to SLP %d instances. Unrolling factor ",
-		       decided_to_slp);
-      dump_dec (MSG_NOTE, unrolling_factor);
-      dump_printf (MSG_NOTE, "\n");
+      OPTINFO_VECT_NOTE
+	<< optinfo_printf ("Decided to SLP %d instances. Unrolling factor ",
+			   decided_to_slp)
+	<< unrolling_factor;
     }
 
   return (decided_to_slp > 0);
@@ -2553,9 +2529,9 @@  vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
 		&& !(gimple_code (use_stmt) == GIMPLE_PHI
 		     && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
 	      {
-		if (dump_enabled_p ())
+		if (optinfo_enabled_p ())
 		  {
-		    dump_printf_loc (MSG_NOTE, vect_location, "use of SLP "
+		    OPTINFO_VECT_NOTE << optinfo_printf ( "use of SLP "
 				     "def in non-SLP stmt: ");
 		    dump_gimple_stmt (MSG_NOTE, TDF_SLIM, use_stmt, 0);
 		  }
@@ -2567,9 +2543,9 @@  vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
   if (stype == hybrid
       && !HYBRID_SLP_STMT (stmt_vinfo))
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
+	  OPTINFO_VECT_NOTE << optinfo_printf ( "marking hybrid: ");
 	  dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
 	}
       STMT_SLP_TYPE (stmt_vinfo) = hybrid;
@@ -2598,9 +2574,9 @@  vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
       if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
 	  && PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
+	      OPTINFO_VECT_NOTE << optinfo_printf ( "marking hybrid: ");
 	      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
 	    }
 	  STMT_SLP_TYPE (vinfo_for_stmt (def_stmt)) = hybrid;
@@ -2637,9 +2613,7 @@  vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
   vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
   slp_instance instance;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
-                     "\n");
+  VECT_OPTINFO_SCOPE ("vect_detect_hybrid_slp");
 
   /* First walk all pattern stmt in the loop and mark defs of uses as
      hybrid because immediate uses in them are not recorded.  */
@@ -2762,33 +2736,30 @@  vect_slp_analyze_node_operations (vec_info *vinfo, slp_tree node,
       gcc_assert (PURE_SLP_STMT (stmt_info));
 
       tree scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "get vectype for scalar type:  ");
-	  dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
-	  dump_printf (MSG_NOTE, "\n");
+	  OPTINFO_VECT_NOTE
+	    << "get vectype for scalar type:  "
+	    << slim (scalar_type);
 	}
 
       tree vectype = get_vectype_for_scalar_type (scalar_type);
       if (!vectype)
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			       "not SLPed: unsupported data-type ");
-	      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				 scalar_type);
-	      dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+	      OPTINFO_VECT_FAILURE
+		<< "not SLPed: unsupported data-type "
+		<< slim (scalar_type);
 	    }
 	  return false;
 	}
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location, "vectype:  ");
-	  dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
-	  dump_printf (MSG_NOTE, "\n");
+	  OPTINFO_VECT_NOTE
+	    << "vectype:  "
+	    << slim (vectype);
 	}
 
       gimple *sstmt;
@@ -2846,9 +2817,7 @@  vect_slp_analyze_operations (vec_info *vinfo)
   slp_instance instance;
   int i;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "=== vect_slp_analyze_operations ===\n");
+  VECT_OPTINFO_SCOPE ("vect_slp_analyze_operations");
 
   for (i = 0; vinfo->slp_instances.iterate (i, &instance); )
     {
@@ -2856,11 +2825,9 @@  vect_slp_analyze_operations (vec_info *vinfo)
 					     SLP_INSTANCE_TREE (instance),
 					     instance))
         {
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "removing SLP instance operations starting from: ");
-	  dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
-			    SLP_TREE_SCALAR_STMTS
-			      (SLP_INSTANCE_TREE (instance))[0], 0);
+	  OPTINFO_VECT_NOTE
+	    << "removing SLP instance operations starting from: "
+	    << SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
 	  vect_free_slp_instance (instance);
           vinfo->slp_instances.ordered_remove (i);
 	}
@@ -2868,15 +2835,15 @@  vect_slp_analyze_operations (vec_info *vinfo)
 	i++;
     }
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "=== vect_analyze_slp_cost ===\n");
+  {
+    VECT_OPTINFO_SCOPE ("vect_analyze_slp_cost");
 
-  /* Compute the costs of the SLP instances.  */
-  scalar_stmts_set_t *visited = new scalar_stmts_set_t ();
-  for (i = 0; vinfo->slp_instances.iterate (i, &instance); ++i)
-    vect_analyze_slp_cost (instance, vinfo->target_cost_data, visited);
-  delete visited;
+    /* Compute the costs of the SLP instances.  */
+    scalar_stmts_set_t *visited = new scalar_stmts_set_t ();
+    for (i = 0; vinfo->slp_instances.iterate (i, &instance); ++i)
+      vect_analyze_slp_cost (instance, vinfo->target_cost_data, visited);
+    delete visited;
+  }
 
   return !vinfo->slp_instances.is_empty ();
 }
@@ -2994,9 +2961,9 @@  vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
 
   vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
+      OPTINFO_VECT_NOTE << optinfo_printf ( "Cost model analysis: \n");
       dump_printf (MSG_NOTE, "  Vector inside of basic block cost: %d\n",
 		   vec_inside_cost);
       dump_printf (MSG_NOTE, "  Vector prologue cost: %d\n", vec_prologue_cost);
@@ -3035,8 +3002,8 @@  vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
 
   if (n_stmts > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "not vectorized: too many instructions in "
 			 "basic block.\n");
       free_data_refs (datarefs);
@@ -3053,10 +3020,10 @@  vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
 
   if (!vect_analyze_data_refs (bb_vinfo, &min_vf))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "not vectorized: unhandled data-ref in basic "
-			 "block.\n");
+			 "block");
 
       delete bb_vinfo;
       return NULL;
@@ -3064,10 +3031,10 @@  vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
 
   if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "not vectorized: not enough data-refs in "
-			 "basic block.\n");
+			 "basic block");
 
       delete bb_vinfo;
       return NULL;
@@ -3075,10 +3042,10 @@  vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
 
   if (!vect_analyze_data_ref_accesses (bb_vinfo))
     {
-     if (dump_enabled_p ())
-       dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+     if (optinfo_enabled_p ())
+       OPTINFO_VECT_FAILURE << optinfo_printf (
 			"not vectorized: unhandled data access in "
-			"basic block.\n");
+			"basic block");
 
       delete bb_vinfo;
       return NULL;
@@ -3089,8 +3056,8 @@  vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
      anyway.  */
   if (bb_vinfo->grouped_stores.is_empty ())
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "not vectorized: no grouped stores in "
 			 "basic block.\n");
 
@@ -3107,11 +3074,11 @@  vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
      trees.  */
   if (!vect_analyze_slp (bb_vinfo, n_stmts))
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  OPTINFO_VECT_FAILURE << optinfo_printf (
 			   "Failed to SLP the basic block.\n");
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
+	  OPTINFO_VECT_FAILURE << optinfo_printf ( 
 			   "not vectorized: failed to find SLP opportunities "
 			   "in basic block.\n");
 	}
@@ -3129,7 +3096,7 @@  vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
       if (! vect_slp_analyze_and_verify_instance_alignment (instance)
 	  || ! vect_slp_analyze_instance_dependence (instance))
 	{
-	  dump_printf_loc (MSG_NOTE, vect_location,
+	  OPTINFO_VECT_NOTE << optinfo_printf (
 			   "removing SLP instance operations starting from: ");
 	  dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
 			    SLP_TREE_SCALAR_STMTS
@@ -3154,8 +3121,8 @@  vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
 
   if (!vect_slp_analyze_operations (bb_vinfo))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "not vectorized: bad operation in basic block.\n");
 
       delete bb_vinfo;
@@ -3166,8 +3133,8 @@  vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
   if (!unlimited_cost_model (NULL)
       && !vect_bb_vectorization_profitable_p (bb_vinfo))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "not vectorized: vectorization is not "
 			 "profitable.\n");
 
@@ -3175,8 +3142,8 @@  vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
       return NULL;
     }
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE << optinfo_printf (
 		     "Basic block will be vectorized using SLP\n");
 
   return bb_vinfo;
@@ -3194,8 +3161,7 @@  vect_slp_bb (basic_block bb)
   bool any_vectorized = false;
   auto_vector_sizes vector_sizes;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
+  OPTINFO_SCOPE ("vect_slp_analyze_bb", bb);
 
   /* Autodetect first vector size we try.  */
   current_vector_size = 0;
@@ -3222,7 +3188,10 @@  vect_slp_bb (basic_block bb)
 	  insns++;
 
 	  if (gimple_location (stmt) != UNKNOWN_LOCATION)
-	    vect_location = gimple_location (stmt);
+	    {
+	      vect_location = gimple_location (stmt);
+	      vect_optinfo_location = stmt;
+	    }
 
 	  if (!find_data_references_in_stmt (NULL, stmt, &datarefs))
 	    break;
@@ -3244,14 +3213,15 @@  vect_slp_bb (basic_block bb)
       if (bb_vinfo
 	  && dbg_cnt (vect_slp))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB part\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE
+	      << "SLPing BB part";
 
 	  vect_schedule_slp (bb_vinfo);
 
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location,
-			     "basic block part vectorized\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_NOTE
+	      << "basic block part vectorized";
 
 	  vectorized = true;
 	}
@@ -3287,13 +3257,11 @@  vect_slp_bb (basic_block bb)
 	{
 	  /* Try the next biggest vector size.  */
 	  current_vector_size = vector_sizes[next_size++];
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-			       "***** Re-trying analysis with "
-			       "vector size ");
-	      dump_dec (MSG_NOTE, current_vector_size);
-	      dump_printf (MSG_NOTE, "\n");
+	      OPTINFO_VECT_NOTE
+		<< "***** Re-trying analysis with vector size "
+		<< current_vector_size;
 	    }
 
 	  /* Start over.  */
@@ -3951,13 +3919,12 @@  vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
 	    }
 	  else
 	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				   "permutation requires at "
-				   "least three vectors ");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				    stmt, 0);
+		  // FIXME: would be better to use "stmt" for location here:
+                  OPTINFO_VECT_FAILURE
+		    << "permutation requires at least three vectors "
+		    << stmt;
 		}
 	      gcc_assert (analyze_only);
 	      return false;
@@ -3973,17 +3940,13 @@  vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
 	      indices.new_vector (mask, 2, nunits);
 	      if (!can_vec_perm_const_p (mode, indices))
 		{
-		  if (dump_enabled_p ())
+		  if (optinfo_enabled_p ())
 		    {
-		      dump_printf_loc (MSG_MISSED_OPTIMIZATION,
-				       vect_location, 
-				       "unsupported vect permute { ");
+		      pending_optinfo info = OPTINFO_VECT_FAILURE
+			<< "unsupported vect permute { ";
 		      for (i = 0; i < nunits; ++i)
-			{
-			  dump_dec (MSG_MISSED_OPTIMIZATION, mask[i]);
-			  dump_printf (MSG_MISSED_OPTIMIZATION, " ");
-			}
-		      dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
+			info << mask[i] << " ";
+		      info << "}";
 		    }
 		  gcc_assert (analyze_only);
 		  return false;
@@ -4093,7 +4056,7 @@  vect_schedule_slp_instance (slp_tree node, slp_instance instance,
   if (!SLP_TREE_VEC_STMTS (node).exists ())
     SLP_TREE_VEC_STMTS (node).create (SLP_TREE_NUMBER_OF_VEC_STMTS (node));
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
       dump_printf_loc (MSG_NOTE,vect_location,
 		       "------>vectorizing SLP node starting from: ");
@@ -4255,9 +4218,9 @@  vect_schedule_slp (vec_info *vinfo)
       /* Schedule the tree of INSTANCE.  */
       is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
                                              instance, bst_map);
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-                         "vectorizing stmts using SLP.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_NOTE
+	  << "vectorizing stmts using SLP";
     }
   delete bst_map;
 
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index 3e73118..55e5029 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -197,7 +197,7 @@  vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
   bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
   gimple *pattern_stmt;
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
       dump_printf_loc (MSG_NOTE, vect_location,
 		       "mark relevant %d, live %d: ", relevant, live_p);
@@ -217,7 +217,7 @@  vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
 
       pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	dump_printf_loc (MSG_NOTE, vect_location,
 			 "last stmt in pattern. don't mark"
 			 " relevant/live.\n");
@@ -235,7 +235,7 @@  vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
   if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
       && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         dump_printf_loc (MSG_NOTE, vect_location,
                          "already marked relevant/live.\n");
       return;
@@ -265,9 +265,9 @@  is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo)
 
       if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "use not simple.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
+				   "use not simple");
 	  return false;
 	}
 
@@ -313,7 +313,7 @@  vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
     if (gimple_vdef (stmt)
 	&& !gimple_clobber_p (stmt))
       {
-	if (dump_enabled_p ())
+	if (optinfo_enabled_p ())
 	  dump_printf_loc (MSG_NOTE, vect_location,
                            "vec_stmt_relevant_p: stmt has vdefs.\n");
 	*relevant = vect_used_in_scope;
@@ -327,7 +327,7 @@  vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
 	  basic_block bb = gimple_bb (USE_STMT (use_p));
 	  if (!flow_bb_inside_loop_p (loop, bb))
 	    {
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		dump_printf_loc (MSG_NOTE, vect_location,
                                  "vec_stmt_relevant_p: used out of loop.\n");
 
@@ -347,7 +347,7 @@  vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
   if (*live_p && *relevant == vect_unused_in_scope
       && !is_simple_and_all_uses_invariant (stmt, loop_vinfo))
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	dump_printf_loc (MSG_NOTE, vect_location,
 			 "vec_stmt_relevant_p: stmt live but not relevant.\n");
       *relevant = vect_used_only_live;
@@ -467,8 +467,8 @@  process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
 
   if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "not vectorized: unsupported use in stmt.\n");
       return false;
     }
@@ -479,7 +479,7 @@  process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
   def_bb = gimple_bb (def_stmt);
   if (!flow_bb_inside_loop_p (loop, def_bb))
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
       return true;
     }
@@ -497,7 +497,7 @@  process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
       && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
       && bb->loop_father == def_bb->loop_father)
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	dump_printf_loc (MSG_NOTE, vect_location,
                          "reduc-stmt defining reduc-phi in the same nest.\n");
       if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
@@ -517,7 +517,7 @@  process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
 		...		  */
   if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	dump_printf_loc (MSG_NOTE, vect_location,
                          "outer-loop def-stmt defining inner-loop stmt.\n");
 
@@ -555,7 +555,7 @@  process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
 		stmt # use (d)		*/
   else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	dump_printf_loc (MSG_NOTE, vect_location,
                          "inner-loop def-stmt defining outer-loop stmt.\n");
 
@@ -590,7 +590,7 @@  process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
 	   && (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father))
 	       == use))
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	dump_printf_loc (MSG_NOTE, vect_location,
                          "induction value on backedge.\n");
       return true;
@@ -633,9 +633,7 @@  vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
   bool live_p;
   enum vect_relevant relevant;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_mark_stmts_to_be_vectorized ===\n");
+  VECT_OPTINFO_SCOPE ("vect_mark_stmts_to_be_vectorized");
 
   auto_vec<gimple *, 64> worklist;
 
@@ -646,7 +644,7 @@  vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
       for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
 	{
 	  phi = gsi_stmt (si);
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
 	      dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
 	      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
@@ -658,7 +656,7 @@  vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
       for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
 	{
 	  stmt = gsi_stmt (si);
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    {
 	      dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
 	      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
@@ -676,7 +674,7 @@  vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
       ssa_op_iter iter;
 
       stmt = worklist.pop ();
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
           dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
           dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
@@ -708,8 +706,8 @@  vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
 		&& relevant != vect_used_by_reduction
 		&& relevant != vect_used_only_live)
 	      {
-		if (dump_enabled_p ())
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+		if (optinfo_enabled_p ())
+		  OPTINFO_VECT_FAILURE << optinfo_printf (
 				   "unsupported use of reduction.\n");
 		return false;
 	      }
@@ -720,8 +718,8 @@  vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
 		&& relevant != vect_used_in_outer_by_reduction
 		&& relevant != vect_used_in_outer)
               {
-                if (dump_enabled_p ())
-                  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+                if (optinfo_enabled_p ())
+                  OPTINFO_VECT_FAILURE << optinfo_printf (
                                    "unsupported use of nested cycle.\n");
 
                 return false;
@@ -733,8 +731,8 @@  vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
 		&& relevant != vect_used_by_reduction
 		&& relevant != vect_used_only_live)
               {
-                if (dump_enabled_p ())
-                  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+                if (optinfo_enabled_p ())
+                  OPTINFO_VECT_FAILURE << optinfo_printf (
                                    "unsupported use of double reduction.\n");
 
                 return false;
@@ -840,7 +838,7 @@  vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
   inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
 				  stmt_info, 0, vect_body);
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
                      "vect_model_simple_cost: inside_cost = %d, "
                      "prologue_cost = %d .\n", inside_cost, prologue_cost);
@@ -885,7 +883,7 @@  vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
       prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
 				      stmt_info, 0, vect_prologue);
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
                      "vect_model_promotion_demotion_cost: inside_cost = %d, "
                      "prologue_cost = %d .\n", inside_cost, prologue_cost);
@@ -939,7 +937,7 @@  vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
       inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
 				      stmt_info, 0, vect_body);
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         dump_printf_loc (MSG_NOTE, vect_location,
                          "vect_model_store_cost: strided group_size = %d .\n",
                          group_size);
@@ -969,7 +967,7 @@  vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
 				       vec_to_scalar, stmt_info, 0, vect_body);
     }
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
                      "vect_model_store_cost: inside_cost = %d, "
                      "prologue_cost = %d .\n", inside_cost, prologue_cost);
@@ -994,7 +992,7 @@  vect_get_store_cost (struct data_reference *dr, int ncopies,
 					  vector_store, stmt_info, 0,
 					  vect_body);
 
-        if (dump_enabled_p ())
+        if (optinfo_enabled_p ())
           dump_printf_loc (MSG_NOTE, vect_location,
                            "vect_model_store_cost: aligned.\n");
         break;
@@ -1006,7 +1004,7 @@  vect_get_store_cost (struct data_reference *dr, int ncopies,
 	*inside_cost += record_stmt_cost (body_cost_vec, ncopies,
 					  unaligned_store, stmt_info,
 					  DR_MISALIGNMENT (dr), vect_body);
-        if (dump_enabled_p ())
+        if (optinfo_enabled_p ())
           dump_printf_loc (MSG_NOTE, vect_location,
                            "vect_model_store_cost: unaligned supported by "
                            "hardware.\n");
@@ -1017,8 +1015,8 @@  vect_get_store_cost (struct data_reference *dr, int ncopies,
       {
         *inside_cost = VECT_MAX_COST;
 
-        if (dump_enabled_p ())
-          dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+        if (optinfo_enabled_p ())
+          OPTINFO_VECT_FAILURE << optinfo_printf (
                            "vect_model_store_cost: unsupported access.\n");
         break;
       }
@@ -1075,7 +1073,7 @@  vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
       inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
 				      stmt_info, 0, vect_body);
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         dump_printf_loc (MSG_NOTE, vect_location,
                          "vect_model_load_cost: strided group_size = %d .\n",
                          group_size);
@@ -1101,7 +1099,7 @@  vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
     inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
 				     stmt_info, 0, vect_body);
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
                      "vect_model_load_cost: inside_cost = %d, "
                      "prologue_cost = %d .\n", inside_cost, prologue_cost);
@@ -1128,7 +1126,7 @@  vect_get_load_cost (struct data_reference *dr, int ncopies,
 	*inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
 					  stmt_info, 0, vect_body);
 
-        if (dump_enabled_p ())
+        if (optinfo_enabled_p ())
           dump_printf_loc (MSG_NOTE, vect_location,
                            "vect_model_load_cost: aligned.\n");
 
@@ -1141,7 +1139,7 @@  vect_get_load_cost (struct data_reference *dr, int ncopies,
 					  unaligned_load, stmt_info,
 					  DR_MISALIGNMENT (dr), vect_body);
 
-        if (dump_enabled_p ())
+        if (optinfo_enabled_p ())
           dump_printf_loc (MSG_NOTE, vect_location,
                            "vect_model_load_cost: unaligned supported by "
                            "hardware.\n");
@@ -1162,7 +1160,7 @@  vect_get_load_cost (struct data_reference *dr, int ncopies,
 	  *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
 					    stmt_info, 0, vect_body);
 
-        if (dump_enabled_p ())
+        if (optinfo_enabled_p ())
           dump_printf_loc (MSG_NOTE, vect_location,
                            "vect_model_load_cost: explicit realign\n");
 
@@ -1170,7 +1168,7 @@  vect_get_load_cost (struct data_reference *dr, int ncopies,
       }
     case dr_explicit_realign_optimized:
       {
-        if (dump_enabled_p ())
+        if (optinfo_enabled_p ())
           dump_printf_loc (MSG_NOTE, vect_location,
                            "vect_model_load_cost: unaligned software "
                            "pipelined.\n");
@@ -1198,7 +1196,7 @@  vect_get_load_cost (struct data_reference *dr, int ncopies,
 	*inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
 					  stmt_info, 0, vect_body);
 
-        if (dump_enabled_p ())
+        if (optinfo_enabled_p ())
           dump_printf_loc (MSG_NOTE, vect_location,
                            "vect_model_load_cost: explicit realign optimized"
                            "\n");
@@ -1210,8 +1208,8 @@  vect_get_load_cost (struct data_reference *dr, int ncopies,
       {
         *inside_cost = VECT_MAX_COST;
 
-        if (dump_enabled_p ())
-          dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+        if (optinfo_enabled_p ())
+          OPTINFO_VECT_FAILURE << optinfo_printf (
                            "vect_model_load_cost: unsupported access.\n");
         break;
       }
@@ -1260,7 +1258,7 @@  vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
        }
     }
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
       dump_printf_loc (MSG_NOTE, vect_location,
                        "created new init_stmt: ");
@@ -1421,20 +1419,20 @@  vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
   stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-                       "vect_get_vec_def_for_operand: ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
-      dump_printf (MSG_NOTE, "\n");
+      OPTINFO_VECT_NOTE
+	<< "vect_get_vec_def_for_operand: "
+	<< slim (op);
     }
 
   is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
   gcc_assert (is_simple_use);
-  if (def_stmt && dump_enabled_p ())
+  if (def_stmt && optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location, "  def_stmt =  ");
-      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
+      OPTINFO_VECT_NOTE
+	<< "  def_stmt =  "
+	<< def_stmt;
     }
 
   if (dt == vect_constant_def || dt == vect_external_def)
@@ -1612,7 +1610,7 @@  vect_finish_stmt_generation_1 (gimple *stmt, gimple *vec_stmt)
 
   set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
       dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
       dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
@@ -1749,8 +1747,8 @@  check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
 	  ? !vect_load_lanes_supported (vectype, group_size, true)
 	  : !vect_store_lanes_supported (vectype, group_size, true))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
 			     "can't use a fully-masked loop because the"
 			     " target doesn't have an appropriate masked"
 			     " load/store-lanes instruction.\n");
@@ -1773,8 +1771,8 @@  check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
 						   TYPE_SIGN (offset_type),
 						   gs_info->scale))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
 			     "can't use a fully-masked loop because the"
 			     " target doesn't have an appropriate masked"
 			     " gather load or scatter store instruction.\n");
@@ -1791,8 +1789,8 @@  check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
     {
       /* Element X of the data must come from iteration i * VF + X of the
 	 scalar loop.  We need more work to support other mappings.  */
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "can't use a fully-masked loop because an access"
 			 " isn't contiguous.\n");
       LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
@@ -1805,8 +1803,8 @@  check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
 	 GET_MODE_SIZE (vecmode)).exists (&mask_mode))
       || !can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "can't use a fully-masked loop because the target"
 			 " doesn't have the appropriate masked load or"
 			 " store.\n");
@@ -1871,7 +1869,7 @@  vect_truncate_gather_scatter_offset (gimple *stmt, loop_vec_info loop_vinfo,
   if (TREE_CODE (step) != INTEGER_CST)
     {
       /* ??? Perhaps we could use range information here?  */
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	dump_printf_loc (MSG_NOTE, vect_location,
 			 "cannot truncate variable step.\n");
       return false;
@@ -1937,7 +1935,7 @@  vect_truncate_gather_scatter_offset (gimple *stmt, loop_vec_info loop_vinfo,
       return true;
     }
 
-  if (overflow_p && dump_enabled_p ())
+  if (overflow_p && optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
 		     "truncating gather/scatter offset to %d bits"
 		     " might change its value.\n", element_bits);
@@ -1978,7 +1976,7 @@  vect_use_strided_gather_scatters_p (gimple *stmt, loop_vec_info loop_vinfo,
       gs_info->offset = fold_convert (offset_type, gs_info->offset);
     }
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
 		     "using gather/scatter for strided/grouped access,"
 		     " scale = %d\n", gs_info->scale);
@@ -2101,7 +2099,7 @@  get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
 	  overrun_p = loop_vinfo && gap != 0;
 	  if (overrun_p && vls_type != VLS_LOAD)
 	    {
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	      OPTINFO_VECT_FAILURE << optinfo_printf (
 			       "Grouped store with gaps requires"
 			       " non-consecutive accesses\n");
 	      return false;
@@ -2116,8 +2114,8 @@  get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
 	    overrun_p = false;
 	  if (overrun_p && !can_overrun_p)
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE << optinfo_printf (
 				 "Peeling for outer loop is not supported\n");
 	      return false;
 	    }
@@ -2202,9 +2200,9 @@  get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
 	  enum vect_def_type dt;
 	  if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				 "use not simple.\n");
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE << optinfo_printf (
+				       "use not simple");
 	      return false;
 	    }
 	  next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
@@ -2214,8 +2212,8 @@  get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
   if (overrun_p)
     {
       gcc_assert (can_overrun_p);
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "Data access with gaps requires scalar "
 			 "epilogue loop\n");
       LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
@@ -2239,8 +2237,8 @@  get_negative_load_store_type (gimple *stmt, tree vectype,
 
   if (ncopies > 1)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "multiple types with negative step.\n");
       return VMAT_ELEMENTWISE;
     }
@@ -2249,15 +2247,15 @@  get_negative_load_store_type (gimple *stmt, tree vectype,
   if (alignment_support_scheme != dr_aligned
       && alignment_support_scheme != dr_unaligned_supported)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "negative step but alignment required.\n");
       return VMAT_ELEMENTWISE;
     }
 
   if (vls_type == VLS_STORE_INVARIANT)
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	dump_printf_loc (MSG_NOTE, vect_location,
 			 "negative step with invariant source;"
 			 " no permute needed.\n");
@@ -2266,8 +2264,8 @@  get_negative_load_store_type (gimple *stmt, tree vectype,
 
   if (!perm_mask_for_reverse (vectype))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "negative step and reversing not supported.\n");
       return VMAT_ELEMENTWISE;
     }
@@ -2305,8 +2303,8 @@  get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
 				    &gs_info->offset_dt,
 				    &gs_info->offset_vectype))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
 			     "%s index use not simple.\n",
 			     vls_type == VLS_LOAD ? "gather" : "scatter");
 	  return false;
@@ -2347,8 +2345,8 @@  get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
        || *memory_access_type == VMAT_STRIDED_SLP)
       && !nunits.is_constant ())
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "Not using elementwise accesses due to variable "
 			 "vectorization factor.\n");
       return false;
@@ -2363,8 +2361,8 @@  get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
 	   && !GROUP_NEXT_ELEMENT (stmt_info)
 	   && !pow2p_hwi (GROUP_SIZE (stmt_info))))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "not falling back to elementwise accesses\n");
       return false;
     }
@@ -2383,16 +2381,16 @@  vect_check_load_store_mask (gimple *stmt, tree mask,
 {
   if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "mask argument is not a boolean.\n");
       return false;
     }
 
   if (TREE_CODE (mask) != SSA_NAME)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "mask argument is not an SSA name.\n");
       return false;
     }
@@ -2404,9 +2402,9 @@  vect_check_load_store_mask (gimple *stmt, tree mask,
   if (!vect_is_simple_use (mask, stmt_info->vinfo, &def_stmt, &mask_dt,
 			   &mask_vectype))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "mask use not simple.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
+			       "mask use not simple");
       return false;
     }
 
@@ -2416,8 +2414,8 @@  vect_check_load_store_mask (gimple *stmt, tree mask,
 
   if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "could not find an appropriate vector mask type.\n");
       return false;
     }
@@ -2425,15 +2423,13 @@  vect_check_load_store_mask (gimple *stmt, tree mask,
   if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
 		TYPE_VECTOR_SUBPARTS (vectype)))
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			   "vector mask type ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_vectype);
-	  dump_printf (MSG_MISSED_OPTIMIZATION,
-		       " does not match vector data type ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
-	  dump_printf (MSG_MISSED_OPTIMIZATION, ".\n");
+	  OPTINFO_VECT_FAILURE
+	    << "vector mask type "
+	    << slim (mask_vectype)
+	    << " does not match vector data type "
+	    << slim (vectype);
 	}
       return false;
     }
@@ -2456,9 +2452,9 @@  vect_check_store_rhs (gimple *stmt, tree rhs, vect_def_type *rhs_dt_out,
      native_encode_expr can handle it.  */
   if (CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "cannot encode constant as a byte sequence.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
+			       "cannot encode constant as a byte sequence");
       return false;
     }
 
@@ -2469,18 +2465,18 @@  vect_check_store_rhs (gimple *stmt, tree rhs, vect_def_type *rhs_dt_out,
   if (!vect_is_simple_use (rhs, stmt_info->vinfo, &def_stmt, &rhs_dt,
 			   &rhs_vectype))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "use not simple.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
+			       "use not simple");
       return false;
     }
 
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
   if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "incompatible vector types.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
+			       "incompatible vector types");
       return false;
     }
 
@@ -2886,9 +2882,8 @@  vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
   if (! vec_stmt)
     {
       STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ==="
-                         "\n");
+
+      VECT_OPTINFO_SCOPE ("vectorizable_bswap");
       if (! slp_node)
 	{
 	  add_stmt_cost (stmt_info->vinfo->target_cost_data,
@@ -3065,9 +3060,9 @@  vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
       if (rhs_type
 	  && !types_compatible_p (rhs_type, TREE_TYPE (op)))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                             "argument types differ.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
+				   "argument types differ");
 	  return false;
 	}
       if (!rhs_type)
@@ -3075,9 +3070,9 @@  vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
 
       if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                             "use not simple.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
+				   "use not simple");
 	  return false;
 	}
 
@@ -3086,9 +3081,9 @@  vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
       else if (opvectype
 	       && opvectype != vectype_in)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                             "argument vector types differ.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
+				   "argument vector types differ");
 	  return false;
 	}
     }
@@ -3100,12 +3095,11 @@  vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
     gcc_assert (vectype_in);
   if (!vectype_in)
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
-          dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                           "no vectype for scalar type ");
-          dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
-          dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+          OPTINFO_VECT_FAILURE
+	    << "no vectype for scalar type "
+	    << slim (rhs_type);
         }
 
       return false;
@@ -3126,8 +3120,8 @@  vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
   /* We only handle functions that do not read or clobber memory.  */
   if (gimple_vuse (stmt))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "function reads from or writes to memory.\n");
       return false;
     }
@@ -3184,8 +3178,8 @@  vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
 				   vectype_in, dt);
       else
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
 			     "function is not vectorizable.\n");
 	  return false;
 	}
@@ -3205,9 +3199,7 @@  vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
-                         "\n");
+      VECT_OPTINFO_SCOPE ("vectorizable_call");
       if (!slp_node)
 	{
 	  vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
@@ -3221,7 +3213,7 @@  vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
 
   /* Transform.  */
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
 
   /* Handle def.  */
@@ -3666,9 +3658,9 @@  vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
 			       &thisarginfo.vectype)
 	  || thisarginfo.dt == vect_uninitialized_def)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "use not simple.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
+				   "use not simple");
 	  return false;
 	}
 
@@ -3741,8 +3733,8 @@  vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
   unsigned HOST_WIDE_INT vf;
   if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "not considering SIMD clones; not yet supported"
 			 " for variable-width vectors.\n");
       return NULL;
@@ -3884,16 +3876,14 @@  vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
 	    STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
 	  }
       STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-			 "=== vectorizable_simd_clone_call ===\n");
+      VECT_OPTINFO_SCOPE ("vectorizable_simd_clone_call");
 /*      vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
       return true;
     }
 
   /* Transform.  */
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
 
   /* Handle def.  */
@@ -4524,19 +4514,17 @@  vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
 	  || (INTEGRAL_TYPE_P (rhs_type)
 	      && !type_has_mode_precision_p (rhs_type))))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                         "type conversion to/from bit-precision unsupported."
-                         "\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
+			       "type conversion to/from bit-precision unsupported");
       return false;
     }
 
   /* Check the operands of the operation.  */
   if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                         "use not simple.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << "use not simple";
       return false;
     }
   if (op_type == binary_op)
@@ -4554,9 +4542,8 @@  vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
 
       if (!ok)
 	{
-          if (dump_enabled_p ())
-            dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                             "use not simple.\n");
+          if (optinfo_enabled_p ())
+            OPTINFO_VECT_FAILURE << "use not simple";
 	  return false;
 	}
     }
@@ -4569,12 +4556,11 @@  vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
     gcc_assert (vectype_in);
   if (!vectype_in)
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                           "no vectype for scalar type ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
-          dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+	  OPTINFO_VECT_FAILURE
+	    << "no vectype for scalar type "
+	    << slim (rhs_type);
 	}
 
       return false;
@@ -4583,13 +4569,12 @@  vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
   if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
       && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                           "can't convert between boolean and non "
-			   "boolean vectors");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
-          dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+	  OPTINFO_VECT_FAILURE
+	    << ("can't convert between boolean and non "
+		"boolean vectors")
+	    << slim (rhs_type);
 	}
 
       return false;
@@ -4637,8 +4622,8 @@  vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
 	break;
       /* FALLTHRU */
     unsupported:
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
                          "conversion not supported by target.\n");
       return false;
 
@@ -4737,9 +4722,7 @@  vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
 
   if (!vec_stmt)		/* transformation not required.  */
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_NOTE, vect_location,
-                         "=== vectorizable_conversion ===\n");
+      VECT_OPTINFO_SCOPE ("vectorizable_conversion");
       if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
         {
 	  STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
@@ -4763,7 +4746,7 @@  vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
     }
 
   /* Transform.  */
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
                      "transform conversion. ncopies = %d.\n", ncopies);
 
@@ -5108,9 +5091,9 @@  vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
 
   if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                         "use not simple.\n");
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
+			       "use not simple");
       return false;
     }
 
@@ -5140,8 +5123,8 @@  vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
       && (!VECTOR_BOOLEAN_TYPE_P (vectype)
 	  || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "type conversion to/from bit-precision "
                          "unsupported.\n");
       return false;
@@ -5150,16 +5133,14 @@  vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
-                         "=== vectorizable_assignment ===\n");
+      VECT_OPTINFO_SCOPE ("vectorizable_assignment");
       if (!slp_node)
 	vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
       return true;
     }
 
   /* Transform.  */
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
 
   /* Handle def.  */
@@ -5307,8 +5288,8 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
   vectype_out = STMT_VINFO_VECTYPE (stmt_info);
   if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "bit-precision shifts not supported.\n");
       return false;
     }
@@ -5316,9 +5297,9 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
   op0 = gimple_assign_rhs1 (stmt);
   if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                         "use not simple.\n");
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
+			       "use not simple");
       return false;
     }
   /* If op0 is an external or constant def use a vector type with
@@ -5329,8 +5310,8 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
     gcc_assert (vectype);
   if (!vectype)
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "no vectype for scalar type\n");
       return false;
     }
@@ -5343,9 +5324,9 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
   op1 = gimple_assign_rhs2 (stmt);
   if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                         "use not simple.\n");
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
+			       "use not simple");
       return false;
     }
 
@@ -5395,8 +5376,8 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
     }
   else
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "operand mode requires invariant argument.\n");
       return false;
     }
@@ -5405,7 +5386,7 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
   if (!scalar_shift_arg)
     {
       optab = optab_for_tree_code (code, vectype, optab_vector);
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         dump_printf_loc (MSG_NOTE, vect_location,
                          "vector/vector shift/rotate found.\n");
 
@@ -5414,8 +5395,8 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
       if (op1_vectype == NULL_TREE
 	  || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
                              "unusable type for last operand in"
                              " vector/vector shift/rotate.\n");
 	  return false;
@@ -5429,7 +5410,7 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
       if (optab
           && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
         {
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
             dump_printf_loc (MSG_NOTE, vect_location,
                              "vector/scalar shift/rotate found.\n");
         }
@@ -5442,7 +5423,7 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
             {
 	      scalar_shift_arg = false;
 
-              if (dump_enabled_p ())
+              if (optinfo_enabled_p ())
                 dump_printf_loc (MSG_NOTE, vect_location,
                                  "vector/vector shift/rotate found.\n");
 
@@ -5459,8 +5440,8 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
 		      && TYPE_MODE (TREE_TYPE (vectype))
 			 != TYPE_MODE (TREE_TYPE (op1)))
 		    {
-                      if (dump_enabled_p ())
-                        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+                      if (optinfo_enabled_p ())
+                        OPTINFO_VECT_FAILURE << optinfo_printf (
                                          "unusable type for last operand in"
                                          " vector/vector shift/rotate.\n");
 		      return false;
@@ -5479,8 +5460,8 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
   /* Supportable by target?  */
   if (!optab)
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "no optab.\n");
       return false;
     }
@@ -5488,15 +5469,15 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
   icode = (int) optab_handler (optab, vec_mode);
   if (icode == CODE_FOR_nothing)
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "op not supported by target.\n");
       /* Check only during analysis.  */
       if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
 	  || (!vec_stmt
 	      && !vect_worthwhile_without_simd_p (vinfo, code)))
         return false;
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         dump_printf_loc (MSG_NOTE, vect_location,
                          "proceeding using word mode.\n");
     }
@@ -5506,8 +5487,8 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
       && !VECTOR_MODE_P (TYPE_MODE (vectype))
       && !vect_worthwhile_without_simd_p (vinfo, code))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "not worthwhile without SIMD support.\n");
       return false;
     }
@@ -5515,9 +5496,7 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
-                         "=== vectorizable_shift ===\n");
+      VECT_OPTINFO_SCOPE ("vectorizable_shift");
       if (!slp_node)
 	vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
       return true;
@@ -5525,7 +5504,7 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
 
   /* Transform.  */
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
                      "transform binary/unary operation.\n");
 
@@ -5547,7 +5526,7 @@  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
               optab_op2_mode = insn_data[icode].operand[2].mode;
               if (!VECTOR_MODE_P (optab_op2_mode))
                 {
-                  if (dump_enabled_p ())
+                  if (optinfo_enabled_p ())
                     dump_printf_loc (MSG_NOTE, vect_location,
                                      "operand 1 using scalar mode.\n");
                   vec_oprnd1 = op1;
@@ -5677,8 +5656,8 @@  vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
   op_type = TREE_CODE_LENGTH (code);
   if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "num. args = %d (not unary/binary/ternary op).\n",
                          op_type);
       return false;
@@ -5696,8 +5675,8 @@  vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
       && code != BIT_XOR_EXPR
       && code != BIT_AND_EXPR)
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "bit-precision arithmetic not supported.\n");
       return false;
     }
@@ -5705,9 +5684,9 @@  vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
   op0 = gimple_assign_rhs1 (stmt);
   if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                         "use not simple.\n");
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
+			       "use not simple");
       return false;
     }
   /* If op0 is an external or constant def use a vector type with
@@ -5723,8 +5702,8 @@  vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
 	{
 	  if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE << optinfo_printf (
 				 "not supported operation on bool value.\n");
 	      return false;
 	    }
@@ -5737,13 +5716,11 @@  vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
     gcc_assert (vectype);
   if (!vectype)
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
-          dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                           "no vectype for scalar type ");
-          dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-                             TREE_TYPE (op0));
-          dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+          OPTINFO_VECT_FAILURE
+	    << "no vectype for scalar type "
+	    << slim (TREE_TYPE (op0));
         }
 
       return false;
@@ -5759,9 +5736,9 @@  vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
       op1 = gimple_assign_rhs2 (stmt);
       if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                             "use not simple.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
+				   "use not simple");
 	  return false;
 	}
     }
@@ -5770,9 +5747,9 @@  vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
       op2 = gimple_assign_rhs3 (stmt);
       if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                             "use not simple.\n");
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
+				   "use not simple");
 	  return false;
 	}
     }
@@ -5802,8 +5779,8 @@  vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
       optab = optab_for_tree_code (code, vectype, optab_default);
       if (!optab)
 	{
-          if (dump_enabled_p ())
-            dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+          if (optinfo_enabled_p ())
+            OPTINFO_VECT_FAILURE << optinfo_printf (
                              "no optab.\n");
 	  return false;
 	}
@@ -5813,14 +5790,14 @@  vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
 
   if (!target_support_p)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
                          "op not supported by target.\n");
       /* Check only during analysis.  */
       if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
 	  || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
         return false;
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
 	dump_printf_loc (MSG_NOTE, vect_location,
                          "proceeding using word mode.\n");
     }
@@ -5830,8 +5807,8 @@  vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
       && !vec_stmt
       && !vect_worthwhile_without_simd_p (vinfo, code))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "not worthwhile without SIMD support.\n");
       return false;
     }
@@ -5839,9 +5816,7 @@  vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_NOTE, vect_location,
-                         "=== vectorizable_operation ===\n");
+      VECT_OPTINFO_SCOPE ("vectorizable_operation");
       if (!slp_node)
 	vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
       return true;
@@ -5849,7 +5824,7 @@  vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
 
   /* Transform.  */
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
                      "transform binary/unary operation.\n");
 
@@ -6029,7 +6004,7 @@  get_group_alias_ptr_type (gimple *first_stmt)
       if (get_alias_set (DR_REF (first_dr))
 	  != get_alias_set (DR_REF (next_dr)))
 	{
-	  if (dump_enabled_p ())
+	  if (optinfo_enabled_p ())
 	    dump_printf_loc (MSG_NOTE, vect_location,
 			     "conflicting alias set types.\n");
 	  return ptr_type_node;
@@ -6128,8 +6103,8 @@  vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
 
       if (slp_node != NULL)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
 			     "SLP of masked stores not supported.\n");
 	  return false;
 	}
@@ -6174,8 +6149,8 @@  vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
   /* FORNOW.  This restriction should be relaxed.  */
   if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "multiple types in nested loop.\n");
       return false;
     }
@@ -6206,8 +6181,8 @@  vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
       else if (memory_access_type != VMAT_LOAD_STORE_LANES
 	       && (memory_access_type != VMAT_GATHER_SCATTER || gs_info.decl))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
 			     "unsupported access type for masked store.\n");
 	  return false;
 	}
@@ -6450,7 +6425,7 @@  vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
   else
     ref_type = reference_alias_ptr_type (DR_REF (first_dr));
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
                      "transform store. ncopies = %d\n", ncopies);
 
@@ -7291,8 +7266,8 @@  vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
 
       if (slp_node != NULL)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
 			     "SLP of masked loads not supported.\n");
 	  return false;
 	}
@@ -7335,8 +7310,8 @@  vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
   /* FORNOW. This restriction should be relaxed.  */
   if (nested_in_vect_loop && ncopies > 1)
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "multiple types in nested loop.\n");
       return false;
     }
@@ -7348,8 +7323,8 @@  vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
       && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
 		   STMT_VINFO_MIN_NEG_DIST (stmt_info)))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "cannot perform implicit CSE when unrolling "
 			 "with negative dependence distance\n");
       return false;
@@ -7362,8 +7337,8 @@  vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
     (e.g. - data copies).  */
   if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "Aligned load, but unsupported type.\n");
       return false;
     }
@@ -7389,8 +7364,8 @@  vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
 	  && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
 		       STMT_VINFO_MIN_NEG_DIST (stmt_info)))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
 			     "cannot perform implicit CSE when performing "
 			     "group loads with negative dependence distance\n");
 	  return false;
@@ -7404,8 +7379,8 @@  vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
 	      != STMT_SLP_TYPE (vinfo_for_stmt
 				 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
 			     "conflicting SLP types for CSEd load\n");
 	  return false;
 	}
@@ -7435,8 +7410,8 @@  vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
 	    = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
 	  if (TREE_CODE (masktype) == INTEGER_TYPE)
 	    {
-	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	      if (optinfo_enabled_p ())
+		OPTINFO_VECT_FAILURE << optinfo_printf (
 				 "masked gather with integer mask not"
 				 " supported.");
 	      return false;
@@ -7445,8 +7420,8 @@  vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
       else if (memory_access_type != VMAT_LOAD_STORE_LANES
 	       && memory_access_type != VMAT_GATHER_SCATTER)
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
 			     "unsupported access type for masked load.\n");
 	  return false;
 	}
@@ -7474,7 +7449,7 @@  vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
     gcc_assert (memory_access_type
 		== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
                      "transform load. ncopies = %d\n", ncopies);
 
@@ -8304,7 +8279,7 @@  vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
 		      && !nested_in_vect_loop
 		      && hoist_defs_of_uses (stmt, loop))
 		    {
-		      if (dump_enabled_p ())
+		      if (optinfo_enabled_p ())
 			{
 			  dump_printf_loc (MSG_NOTE, vect_location,
 					   "hoisting out of the vectorized "
@@ -8566,8 +8541,8 @@  vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
       /* FORNOW: not yet supported.  */
       if (STMT_VINFO_LIVE_P (stmt_info))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
 			     "value used after loop.\n");
 	  return false;
 	}
@@ -8968,8 +8943,8 @@  vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
 
   if (STMT_VINFO_LIVE_P (stmt_info))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE << optinfo_printf (
 			 "value used after loop.\n");
       return false;
     }
@@ -9225,7 +9200,7 @@  vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
   gimple *pattern_stmt;
   gimple_seq pattern_def_seq;
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
       dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
       dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
@@ -9233,8 +9208,8 @@  vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
 
   if (gimple_has_volatile_ops (stmt))
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << optinfo_printf (
                          "not vectorized: stmt has volatile operands\n");
 
       return false;
@@ -9266,7 +9241,7 @@  vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
           /* Analyze PATTERN_STMT instead of the original stmt.  */
           stmt = pattern_stmt;
           stmt_info = vinfo_for_stmt (pattern_stmt);
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
             {
               dump_printf_loc (MSG_NOTE, vect_location,
                                "==> examining pattern statement: ");
@@ -9275,7 +9250,7 @@  vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
         }
       else
         {
-          if (dump_enabled_p ())
+          if (optinfo_enabled_p ())
             dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
 
           return true;
@@ -9288,7 +9263,7 @@  vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
                || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
     {
       /* Analyze PATTERN_STMT too.  */
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
           dump_printf_loc (MSG_NOTE, vect_location,
                            "==> examining pattern statement: ");
@@ -9313,7 +9288,7 @@  vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
 	      || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
 	    {
 	      /* Analyze def stmt of STMT if it's a pattern stmt.  */
-	      if (dump_enabled_p ())
+	      if (optinfo_enabled_p ())
 		{
 		  dump_printf_loc (MSG_NOTE, vect_location,
                                    "==> examining pattern def statement: ");
@@ -9402,12 +9377,11 @@  vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
 
   if (!ok)
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
-          dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                           "not vectorized: relevant stmt not ");
-          dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
-          dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+          OPTINFO_VECT_FAILURE
+	    << "not vectorized: relevant stmt not supported: "
+	    << stmt;
         }
 
       return false;
@@ -9421,11 +9395,11 @@  vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
   if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
       && !can_vectorize_live_stmts (stmt, NULL, node, NULL))
     {
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         {
-          dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                           "not vectorized: live stmt not supported: ");
-          dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+          OPTINFO_VECT_FAILURE
+	    << "not vectorized: live stmt not supported: "
+	    << stmt;
         }
 
        return false;
@@ -9540,8 +9514,8 @@  vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
     default:
       if (!STMT_VINFO_LIVE_P (stmt_info))
 	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+	  if (optinfo_enabled_p ())
+	    OPTINFO_VECT_FAILURE << optinfo_printf (
                              "stmt not supported.\n");
 	  gcc_unreachable ();
 	}
@@ -9569,7 +9543,7 @@  vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
       tree scalar_dest;
       gimple *exit_phi;
 
-      if (dump_enabled_p ())
+      if (optinfo_enabled_p ())
         dump_printf_loc (MSG_NOTE, vect_location,
                          "Record the vdef for outer-loop vectorization.\n");
 
@@ -9907,12 +9881,11 @@  vect_is_simple_use (tree operand, vec_info *vinfo,
   *def_stmt = NULL;
   *dt = vect_unknown_def_type;
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-                       "vect_is_simple_use: operand ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
-      dump_printf (MSG_NOTE, "\n");
+      OPTINFO_VECT_NOTE
+	<< "vect_is_simple_use: operand "
+	<< slim (operand);
     }
 
   if (CONSTANT_CLASS_P (operand))
@@ -9929,9 +9902,9 @@  vect_is_simple_use (tree operand, vec_info *vinfo,
 
   if (TREE_CODE (operand) != SSA_NAME)
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "not ssa-name.\n");
+      if (optinfo_enabled_p ())
+	OPTINFO_VECT_FAILURE
+	  << "not ssa-name";
       return false;
     }
 
@@ -9942,10 +9915,10 @@  vect_is_simple_use (tree operand, vec_info *vinfo,
     }
 
   *def_stmt = SSA_NAME_DEF_STMT (operand);
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
-      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
+      OPTINFO_VECT_NOTE
+	<< "def_stmt: " << *def_stmt;
     }
 
   if (! vect_stmt_in_region_p (vinfo, *def_stmt))
@@ -9956,46 +9929,45 @@  vect_is_simple_use (tree operand, vec_info *vinfo,
       *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
     }
 
-  if (dump_enabled_p ())
+  if (optinfo_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
+      pending_optinfo info = OPTINFO_VECT_NOTE << "type of def: ";
       switch (*dt)
 	{
 	case vect_uninitialized_def:
-	  dump_printf (MSG_NOTE, "uninitialized\n");
+	  info << "uninitialized";
 	  break;
 	case vect_constant_def:
-	  dump_printf (MSG_NOTE, "constant\n");
+	  info << "constant";
 	  break;
 	case vect_external_def:
-	  dump_printf (MSG_NOTE, "external\n");
+	  info << "external";
 	  break;
 	case vect_internal_def:
-	  dump_printf (MSG_NOTE, "internal\n");
+	  info << "internal";
 	  break;
 	case vect_induction_def:
-	  dump_printf (MSG_NOTE, "induction\n");
+	  info << "induction";
 	  break;
 	case vect_reduction_def:
-	  dump_printf (MSG_NOTE, "reduction\n");
+	  info << "reduction";
 	  break;
 	case vect_double_reduction_def:
-	  dump_printf (MSG_NOTE, "double reduction\n");
+	  info << "double reduction";
 	  break;
 	case vect_nested_cycle:
-	  dump_printf (MSG_NOTE, "nested cycle\n");
+	  info << "nested cycle";
 	  break;
 	case vect_unknown_def_type:
-	  dump_printf (MSG_NOTE, "unknown\n");
+	  info << "unknown";
 	  break;
 	}
     }
 
   if (*dt == vect_unknown_def_type)
     {
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                         "Unsupported pattern.\n");
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << "Unsupported pattern";
       return false;
     }
 
@@ -10006,9 +9978,8 @@  vect_is_simple_use (tree operand, vec_info *vinfo,
     case GIMPLE_CALL:
       break;
     default:
-      if (dump_enabled_p ())
-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                         "unsupported defining stmt:\n");
+      if (optinfo_enabled_p ())
+        OPTINFO_VECT_FAILURE << "unsupported defining stmt:";
       return false;
     }
 
diff --git a/gcc/tree-vectorizer.c b/gcc/tree-vectorizer.c
index fb81b98..f31bc5d 100644
--- a/gcc/tree-vectorizer.c
+++ b/gcc/tree-vectorizer.c
@@ -78,10 +78,12 @@  along with GCC; see the file COPYING3.  If not see
 #include "tree-scalar-evolution.h"
 #include "stringpool.h"
 #include "attribs.h"
+#include "optinfo.h"
 
 
 /* Loop or bb location.  */
 source_location vect_location;
+optinfo_location vect_optinfo_location;
 
 /* Vector mapping GIMPLE stmt to stmt_vec_info. */
 vec<stmt_vec_info> stmt_vec_info_vec;
@@ -649,17 +651,28 @@  vectorize_loops (void)
 	if (!((flag_tree_loop_vectorize
 	       && optimize_loop_nest_for_speed_p (loop))
 	      || loop->force_vectorize))
-	  continue;
+	  {
+	    /* FIXME: maybe break these out into separate failure messages.
+	       This can happen e.g. when the loop is not hot.  */
+	    if (optinfo_enabled_p ())
+	      {
+		vect_location = find_loop_location (loop);
+		OPTINFO_VECT_FAILURE << "not attempting to optimize loop nest";
+	      }
+	    continue;
+	  }
 	orig_loop_vinfo = NULL;
 	loop_vectorized_call = vect_loop_vectorized_call (loop);
 	loop_dist_alias_call = vect_loop_dist_alias_call (loop);
        vectorize_epilogue:
 	vect_location = find_loop_location (loop);
+	vect_optinfo_location = loop;
         if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
 	    && dump_enabled_p ())
 	  dump_printf (MSG_NOTE, "\nAnalyzing loop at %s:%d\n",
                        LOCATION_FILE (vect_location),
 		       LOCATION_LINE (vect_location));
+	VECT_OPTINFO_SCOPE ("analyzing loop");
 
 	loop_vinfo = vect_analyze_loop (loop, orig_loop_vinfo);
 	loop->aux = loop_vinfo;
@@ -733,10 +746,8 @@  vectorize_loops (void)
 
 	if (loop_vectorized_call)
 	  set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
-        if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
-	    && dump_enabled_p ())
-          dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
-                           "loop vectorized\n");
+        if (optinfo_enabled_p ())
+	  OPTINFO_VECT_SUCCESS << "loop vectorized";
 	new_loop = vect_transform_loop (loop_vinfo);
 	num_vectorized_loops++;
 	/* Now that the loop has been vectorized, allow it to be unrolled
@@ -778,14 +789,15 @@  vectorize_loops (void)
 	  }
       }
 
+  if (optinfo_enabled_p ())
+    OPTINFO_VECT_NOTE
+      << optinfo_printf ("vectorized %u loops in function",
+			 num_vectorized_loops);
+
   vect_location = UNKNOWN_LOCATION;
+  vect_optinfo_location = (gimple *)NULL;
 
   statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
-  if (dump_enabled_p ()
-      || (num_vectorized_loops > 0 && dump_enabled_p ()))
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "vectorized %u loops in function.\n",
-                     num_vectorized_loops);
 
   /*  ----------- Finalize. -----------  */
 
@@ -1120,6 +1132,7 @@  increase_alignment (void)
   varpool_node *vnode;
 
   vect_location = UNKNOWN_LOCATION;
+  vect_optinfo_location = (gimple *)NULL;
   type_align_map = new hash_map<tree, unsigned>;
 
   /* Increase the alignment of all global arrays for vectorization.  */
@@ -1137,9 +1150,12 @@  increase_alignment (void)
       if (alignment && vect_can_force_dr_alignment_p (decl, alignment))
         {
 	  vnode->increase_alignment (alignment);
-          dump_printf (MSG_NOTE, "Increasing alignment of decl: ");
-          dump_generic_expr (MSG_NOTE, TDF_SLIM, decl);
-          dump_printf (MSG_NOTE, "\n");
+	  if (optinfo_enabled_p ())
+	    {
+	      OPTINFO_VECT_NOTE
+		<< "Increasing alignment of decl: "
+		<< slim (decl);
+	    }
         }
     }
 
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index 7e2b00f..45baaf3 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -24,6 +24,7 @@  along with GCC; see the file COPYING3.  If not see
 #include "tree-data-ref.h"
 #include "tree-hash-traits.h"
 #include "target.h"
+#include "optinfo.h"
 
 /* Used for naming of new temporaries.  */
 enum vect_var_kind {
@@ -1383,6 +1384,42 @@  vect_get_scalar_dr_size (struct data_reference *dr)
 /* Source location */
 extern source_location vect_location;
 
+/* FIXME: update this whenever vect_location changes.
+   FIXME: or merge it all together into this class.  */
+extern GTY(()) optinfo_location vect_optinfo_location;
+
+extern pending_optinfo
+emit_optinfo_at_vect_location (const optinfo_impl_location &impl_location,
+			       enum optinfo_kind kind);
+
+/* Emit optimization information at the vect location.  */
+
+#define OPTINFO_VECT(KIND) \
+  (emit_optinfo_at_vect_location (OPTINFO_IMPL_LOCATION, (KIND)))
+
+/* Emit that a successful optimization happened at the vect location.  */
+
+#define OPTINFO_VECT_SUCCESS \
+  (OPTINFO_VECT (OPTINFO_KIND_SUCCESS))
+
+/* Emit that a failed optimization happened at the vect location.  */
+
+#define OPTINFO_VECT_FAILURE \
+  (OPTINFO_VECT (OPTINFO_KIND_FAILURE))
+
+/* Emit a remark relating to an optimization at the vect location.  */
+
+#define OPTINFO_VECT_NOTE \
+  (OPTINFO_VECT (OPTINFO_KIND_NOTE))
+
+/* A macro for emitting an optinfo note about entering a scope,
+   pushing and popping the scope, so that all optinfos "within"
+   the scope are nested within it.  Use the vect location.  */
+
+#define VECT_OPTINFO_SCOPE(NAME) \
+  OPTINFO_SCOPE ((NAME), vect_optinfo_location)
+
+
 /*-----------------------------------------------------------------*/
 /* Function prototypes.                                            */
 /*-----------------------------------------------------------------*/