Change  HLO op names to match XLA op code names

DivOp op code in XLA is "divide" instead of "div". Op names are still using the shorter name as that is what XLA builders are using.

Similarly for MulOp, MaxOp, MinOp and SubOp.

PiperOrigin-RevId: 301447200
diff --git a/bindings/python/pyiree/compiler/compiler_test.py b/bindings/python/pyiree/compiler/compiler_test.py
index 2b68f0c..3f8a483 100644
--- a/bindings/python/pyiree/compiler/compiler_test.py
+++ b/bindings/python/pyiree/compiler/compiler_test.py
@@ -19,7 +19,7 @@
 SIMPLE_MUL_ASM = """
 func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
       attributes { iree.module.export } {
-    %0 = "xla_hlo.mul"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
+    %0 = "xla_hlo.multiply"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
     return %0 : tensor<4xf32>
 }
 """
diff --git a/bindings/python/pyiree/rt/system_api_test.py b/bindings/python/pyiree/rt/system_api_test.py
index 5dc1548..f1702be 100644
--- a/bindings/python/pyiree/rt/system_api_test.py
+++ b/bindings/python/pyiree/rt/system_api_test.py
@@ -29,7 +29,7 @@
   module @arithmetic {
     func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
           attributes { iree.module.export } {
-        %0 = "xla_hlo.mul"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
+        %0 = "xla_hlo.multiply"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
         return %0 : tensor<4xf32>
     }
   }
diff --git a/bindings/python/pyiree/rt/vm_test.py b/bindings/python/pyiree/rt/vm_test.py
index e947000..ecef32c 100644
--- a/bindings/python/pyiree/rt/vm_test.py
+++ b/bindings/python/pyiree/rt/vm_test.py
@@ -26,7 +26,7 @@
   input_module = ctx.parse_asm("""
     func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
           attributes { iree.module.export } {
-        %0 = "xla_hlo.mul"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
+        %0 = "xla_hlo.multiply"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
         return %0 : tensor<4xf32>
     }
     """)
diff --git a/colab/edge_detection.ipynb b/colab/edge_detection.ipynb
index bb727c1..ac71f1e 100644
--- a/colab/edge_detection.ipynb
+++ b/colab/edge_detection.ipynb
@@ -122,9 +122,9 @@
             "    %0 = xla_hlo.constant dense<[[[[-1.000000e+00]], [[0.000000e+00]], [[1.000000e+00]]], [[[-2.000000e+00]], [[0.000000e+00]], [[2.000000e+00]]], [[[-1.000000e+00]], [[0.000000e+00]], [[1.000000e+00]]]]> : tensor<3x3x1x1xf32>\n",
             "    %1 = xla_hlo.constant dense<[[[[1.000000e+00]], [[2.000000e+00]], [[1.000000e+00]]], [[[0.000000e+00]], [[0.000000e+00]], [[0.000000e+00]]], [[[-1.000000e+00]], [[-2.000000e+00]], [[-1.000000e+00]]]]> : tensor<3x3x1x1xf32>\n",
             "    %2 = \"xla_hlo.conv\"(%arg0, %0) {batch_group_count = 1 : i64, dimension_numbers = {input_batch_dimension = 0 : i64, input_feature_dimension = 3 : i64, input_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>, kernel_input_feature_dimension = 2 : i64, kernel_output_feature_dimension = 3 : i64, kernel_spatial_dimensions = dense<[0, 1]> : tensor<2xi64>, output_batch_dimension = 0 : i64, output_feature_dimension = 3 : i64, output_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>}, feature_group_count = 1 : i64, padding = dense<1> : tensor<2x2xi64>, rhs_dilation = dense<1> : tensor<2xi64>, window_strides = dense<1> : tensor<2xi64>} : (tensor<1x128x128x1xf32>, tensor<3x3x1x1xf32>) -> tensor<1x128x128x1xf32>\n",
-            "    %3 = xla_hlo.mul %2, %2 : tensor<1x128x128x1xf32>\n",
+            "    %3 = xla_hlo.multiply %2, %2 : tensor<1x128x128x1xf32>\n",
             "    %4 = \"xla_hlo.conv\"(%arg0, %1) {batch_group_count = 1 : i64, dimension_numbers = {input_batch_dimension = 0 : i64, input_feature_dimension = 3 : i64, input_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>, kernel_input_feature_dimension = 2 : i64, kernel_output_feature_dimension = 3 : i64, kernel_spatial_dimensions = dense<[0, 1]> : tensor<2xi64>, output_batch_dimension = 0 : i64, output_feature_dimension = 3 : i64, output_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>}, feature_group_count = 1 : i64, padding = dense<1> : tensor<2x2xi64>, rhs_dilation = dense<1> : tensor<2xi64>, window_strides = dense<1> : tensor<2xi64>} : (tensor<1x128x128x1xf32>, tensor<3x3x1x1xf32>) -> tensor<1x128x128x1xf32>\n",
-            "    %5 = xla_hlo.mul %4, %4 : tensor<1x128x128x1xf32>\n",
+            "    %5 = xla_hlo.multiply %4, %4 : tensor<1x128x128x1xf32>\n",
             "    %6 = xla_hlo.add %3, %5 : tensor<1x128x128x1xf32>\n",
             "    %7 = \"xla_hlo.sqrt\"(%6) : (tensor<1x128x128x1xf32>) -> tensor<1x128x128x1xf32>\n",
             "    return %7 : tensor<1x128x128x1xf32>\n",
diff --git a/colab/low_level_invoke_function.ipynb b/colab/low_level_invoke_function.ipynb
index b36ea46..94a6089 100644
--- a/colab/low_level_invoke_function.ipynb
+++ b/colab/low_level_invoke_function.ipynb
@@ -54,7 +54,7 @@
         "  module @arithmetic {\n",
         "    func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>\n",
         "          attributes { iree.module.export } {\n",
-        "        %0 = \"xla_hlo.mul\"(%arg0, %arg1) {name = \"mul.1\"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>\n",
+        "        %0 = \"xla_hlo.multiply\"(%arg0, %arg1) {name = \"mul.1\"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>\n",
         "        return %0 : tensor<4xf32>\n",
         "    } \n",
         "  }\n",
diff --git a/colab/mnist_tensorflow.ipynb b/colab/mnist_tensorflow.ipynb
index 4d552a6..8a92b1c 100644
--- a/colab/mnist_tensorflow.ipynb
+++ b/colab/mnist_tensorflow.ipynb
@@ -523,22 +523,22 @@
             "    %7 = \"xla_hlo.reshape\"(%arg0) : (tensor<1x28x28x1xf32>) -> tensor<1x784xf32>\n",
             "    %8 = \"xla_hlo.dot\"(%7, %3) : (tensor<1x784xf32>, tensor<784x128xf32>) -> tensor<1x128xf32>\n",
             "    %9 = \"xla_hlo.add\"(%8, %2) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<1x128xf32>, tensor<128xf32>) -> tensor<1x128xf32>\n",
-            "    %10 = xla_hlo.max %9, %4 : tensor<1x128xf32>\n",
+            "    %10 = xla_hlo.maximum %9, %4 : tensor<1x128xf32>\n",
             "    %11 = \"xla_hlo.dot\"(%10, %1) : (tensor<1x128xf32>, tensor<128x10xf32>) -> tensor<1x10xf32>\n",
             "    %12 = \"xla_hlo.add\"(%11, %0) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<10xf32>) -> tensor<1x10xf32>\n",
             "    %13 = \"xla_hlo.reduce\"(%12, %5) ( {\n",
             "    ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>):\t// no predecessors\n",
-            "      %18 = xla_hlo.max %arg1, %arg2 : tensor<f32>\n",
+            "      %18 = xla_hlo.maximum %arg1, %arg2 : tensor<f32>\n",
             "      \"xla_hlo.return\"(%18) : (tensor<f32>) -> ()\n",
             "    }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>\n",
-            "    %14 = \"xla_hlo.sub\"(%12, %13) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<1xf32>) -> tensor<1x10xf32>\n",
+            "    %14 = \"xla_hlo.subtract\"(%12, %13) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<1xf32>) -> tensor<1x10xf32>\n",
             "    %15 = \"xla_hlo.exp\"(%14) : (tensor<1x10xf32>) -> tensor<1x10xf32>\n",
             "    %16 = \"xla_hlo.reduce\"(%15, %6) ( {\n",
             "    ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>):\t// no predecessors\n",
             "      %18 = xla_hlo.add %arg1, %arg2 : tensor<f32>\n",
             "      \"xla_hlo.return\"(%18) : (tensor<f32>) -> ()\n",
             "    }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>\n",
-            "    %17 = \"xla_hlo.div\"(%15, %16) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<1xf32>) -> tensor<1x10xf32>\n",
+            "    %17 = \"xla_hlo.divide\"(%15, %16) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<1xf32>) -> tensor<1x10xf32>\n",
             "    return %17 : tensor<1x10xf32>\n",
             "  }\n",
             "}\n",
diff --git a/colab/simple_tensorflow_module_import.ipynb b/colab/simple_tensorflow_module_import.ipynb
index f07e0b4..3ba73a0 100644
--- a/colab/simple_tensorflow_module_import.ipynb
+++ b/colab/simple_tensorflow_module_import.ipynb
@@ -141,7 +141,7 @@
             "  \"tf_saved_model.global_tensor\"() {is_mutable, sym_name = \"__sm_node1__v\", tf_saved_model.exported_names = [\"v\"], type = tensor<1xf32>, value = dense<4.000000e+00> : tensor<1xf32>} : () -> ()\n",
             "  func @__inference_add_10820(%arg0: tensor<4xf32> {tf_saved_model.index_path = [0]}, %arg1: tensor<4xf32> {tf_saved_model.index_path = [1]}, %arg2: tensor<*x!tf.resource> {tf_saved_model.bound_input = @__sm_node1__v}) -> (tensor<4xf32> {tf_saved_model.index_path = []}) attributes {tf._input_shapes = [\"tfshape$dim { size: 4 }\", \"tfshape$dim { size: 4 }\", \"tfshape$unknown_rank: true\"], tf.signature.is_stateful, tf_saved_model.exported_names = [\"add\"]} {\n",
             "    %0 = \"tf.ReadVariableOp\"(%arg2) {_output_shapes = [\"tfshape$dim { size: 1 }\"], device = \"\", dtype = f32} : (tensor<*x!tf.resource>) -> tensor<1xf32>\n",
-            "    %1 = \"xla_hlo.mul\"(%0, %arg0) : (tensor<1xf32>, tensor<4xf32>) -> tensor<4xf32>\n",
+            "    %1 = \"xla_hlo.multiply\"(%0, %arg0) : (tensor<1xf32>, tensor<4xf32>) -> tensor<4xf32>\n",
             "    %2 = xla_hlo.add %1, %arg1 : tensor<4xf32>\n",
             "    %3 = \"xla_hlo.tanh\"(%2) : (tensor<4xf32>) -> tensor<4xf32>\n",
             "    return %3 : tensor<4xf32>\n",
diff --git a/docs/mnist_example.md b/docs/mnist_example.md
index 0272f23..e696957 100644
--- a/docs/mnist_example.md
+++ b/docs/mnist_example.md
@@ -38,7 +38,7 @@
     %cst_1 = constant  {name = "constant.6"} dense<0.5> : tensor<128xf32>
     %5 = "xla_hlo.broadcast_in_dim"(%cst_1) {broadcast_dimensions = dense<1> : tensor<1xi64>, name = "broadcast.7"} : (tensor<128xf32>) -> tensor<1x128xf32>
     %6 = "xla_hlo.add"(%4, %5) {name = "add.8"} : (tensor<1x128xf32>, tensor<1x128xf32>) -> tensor<1x128xf32>
-    %7 = "xla_hlo.max"(%0, %6) {name = "maximum.11"} : (tensor<1x128xf32>, tensor<1x128xf32>) -> tensor<1x128xf32>
+    %7 = "xla_hlo.maximum"(%0, %6) {name = "maximum.11"} : (tensor<1x128xf32>, tensor<1x128xf32>) -> tensor<1x128xf32>
     %cst_2 = constant  {name = "constant.12"} dense<0.5> : tensor<128x10xf32>
     %8 = "xla_hlo.dot"(%7, %cst_2) {name = "dot.13", precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x128xf32>, tensor<128x10xf32>) -> tensor<1x10xf32>
     %cst_3 = constant  {name = "constant.14"} dense<0.5> : tensor<10xf32>
@@ -47,11 +47,11 @@
     %cst_4 = constant  {name = "constant.17"} dense<0xFF800000> : tensor<f32>
     %11 = "xla_hlo.reduce"(%10, %cst_4) ( {
     ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>):   // no predecessors
-      %20 = "xla_hlo.max"(%arg1, %arg2) {name = "maximum.21"} : (tensor<f32>, tensor<f32>) -> tensor<f32>
+      %20 = "xla_hlo.maximum"(%arg1, %arg2) {name = "maximum.21"} : (tensor<f32>, tensor<f32>) -> tensor<f32>
       "xla_hlo.return"(%20) : (tensor<f32>) -> ()
     }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>
     %12 = "xla_hlo.broadcast_in_dim"(%11) {broadcast_dimensions = dense<0> : tensor<1xi64>, name = "broadcast.23"} : (tensor<1xf32>) -> tensor<1x10xf32>
-    %13 = "xla_hlo.sub"(%10, %12) {name = "subtract.24"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
+    %13 = "xla_hlo.subtract"(%10, %12) {name = "subtract.24"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
     %14 = "xla_hlo.exp"(%13) {name = "exponential.25"} : (tensor<1x10xf32>) -> tensor<1x10xf32>
     %cst_5 = constant  {name = "constant.27"} dense<0.5> : tensor<f32>
     %15 = "xla_hlo.reduce"(%14, %cst_5) ( {
@@ -60,7 +60,7 @@
       "xla_hlo.return"(%21) : (tensor<f32>) -> ()
     }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>
     %16 = "xla_hlo.broadcast_in_dim"(%15) {broadcast_dimensions = dense<0> : tensor<1xi64>, name = "broadcast.34"} : (tensor<1xf32>) -> tensor<1x10xf32>
-    %17 = "xla_hlo.div"(%14, %16) {name = "divide.35"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
+    %17 = "xla_hlo.divide"(%14, %16) {name = "divide.35"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
     %18 = "xla_hlo.reshape"(%17) {name = "reshape.36"} : (tensor<1x10xf32>) -> tensor<1x10xf32>
     %19 = "xla_hlo.tuple"(%18) {name = "tuple.37"} : (tensor<1x10xf32>) -> tuple<tensor<1x10xf32>>
     return %19 : tuple<tensor<1x10xf32>>
@@ -114,7 +114,7 @@
           %1 = "xla_hlo.broadcast_in_dim"(%cst_0) {name = "broadcast.10"} : (tensor<f32>) -> tensor<1x128xf32>
           %2 = "xla_hlo.broadcast_in_dim"(%cst) {broadcast_dimensions = dense<1> : tensor<1xi64>, name = "broadcast.7"} : (tensor<128xf32>) -> tensor<1x128xf32>
           %3 = addf %0, %2 : tensor<1x128xf32>
-          %4 = xla_hlo.max %1, %3 {name = "maximum.11"} : tensor<1x128xf32>
+          %4 = xla_hlo.maximum %1, %3 {name = "maximum.11"} : tensor<1x128xf32>
           iree.store_output(%4 : tensor<1x128xf32>, %arg1 : memref<1x128xf32>)
           iree.return
         }
@@ -159,7 +159,7 @@
           %cst = constant dense<0xFF800000> : tensor<f32>
           %1 = "xla_hlo.reduce"(%0, %cst) ( {
           ^bb0(%arg2: tensor<f32>, %arg3: tensor<f32>): // no predecessors
-            %2 = xla_hlo.max %arg2, %arg3 {name = "maximum.21"} : tensor<f32>
+            %2 = xla_hlo.maximum %arg2, %arg3 {name = "maximum.21"} : tensor<f32>
             "xla_hlo.return"(%2) : (tensor<f32>) -> ()
           }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>
           iree.store_output(%1 : tensor<1xf32>, %arg1 : memref<1xf32>)
diff --git a/docs/roadmap_design.md b/docs/roadmap_design.md
index 9f7dc6e..6987a01 100644
--- a/docs/roadmap_design.md
+++ b/docs/roadmap_design.md
@@ -347,9 +347,9 @@
 
 ```mlir
 %bcast = "xla_hlo.broadcast_in_dim"(%cst) : (tensor<f32>) -> tensor<1024x10xf32>
-%mul1 = xla_hlo.mul %arg0, %bcast : tensor<1024x10xf32>
+%mul1 = xla_hlo.multiply %arg0, %bcast : tensor<1024x10xf32>
 // (pretend something here that prevents fusion)
-%mul2 = xla_hlo.mul %arg1, %bcast : tensor<1024x10xf32>
+%mul2 = xla_hlo.multiply %arg1, %bcast : tensor<1024x10xf32>
 ```
 
 ```mlir
@@ -359,11 +359,11 @@
 }
 // a barrier will be required here
 %mul1 = flow.dispatch.region(%arg0 : tensor<1024x10xf32>, %bcast : tensor<1024x10xf32>) -> tensor<1024x10xf32> {
-  %1 = xla_hlo.mul %arg0, %bcast : tensor<1024x10xf32>
+  %1 = xla_hlo.multiply %arg0, %bcast : tensor<1024x10xf32>
   return %1 : tensor<1024x10xf32>
 }
 %mul2 = flow.dispatch.region(%arg1 : tensor<1024x10xf32>, %bcast : tensor<1024x10xf32>) -> tensor<1024x10xf32> {
-  %2 = xla_hlo.mul %arg1, %bcast : tensor<1024x10xf32>
+  %2 = xla_hlo.multiply %arg1, %bcast : tensor<1024x10xf32>
   return %2 : tensor<1024x10xf32>
 }
 ```
diff --git a/docs/simple_ir_walkthrough.md b/docs/simple_ir_walkthrough.md
index 311f043..a820c99 100644
--- a/docs/simple_ir_walkthrough.md
+++ b/docs/simple_ir_walkthrough.md
@@ -92,7 +92,7 @@
 ```mlir
 func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
     attributes { iree.module.export } {
-  %0 = xla_hlo.mul(%arg0, %arg1) : tensor<4xf32>
+  %0 = xla_hlo.multiply(%arg0, %arg1) : tensor<4xf32>
   return %0 : tensor<4xf32>
 }
 ```
diff --git a/iree/compiler/Dialect/Flow/Analysis/test/dispatchability.mlir b/iree/compiler/Dialect/Flow/Analysis/test/dispatchability.mlir
index ff889be..f2e8728 100644
--- a/iree/compiler/Dialect/Flow/Analysis/test/dispatchability.mlir
+++ b/iree/compiler/Dialect/Flow/Analysis/test/dispatchability.mlir
@@ -41,8 +41,8 @@
 // CHECK-SAME: dispatchable = true
 func @hloElementwiseOps(%arg0 : tensor<4xf32>) -> tensor<4xf32> {
   %0 = xla_hlo.add %arg0, %arg0 : tensor<4xf32>
-  %1 = xla_hlo.sub %0, %arg0 : tensor<4xf32>
-  %2 = xla_hlo.mul %1, %arg0 : tensor<4xf32>
+  %1 = xla_hlo.subtract %0, %arg0 : tensor<4xf32>
+  %2 = xla_hlo.multiply %1, %arg0 : tensor<4xf32>
   return %2 : tensor<4xf32>
 }
 
@@ -53,7 +53,7 @@
 func @interleavedDot(%arg0 : tensor<4x4xf32>) -> tensor<4x4xf32> {
   %0 = xla_hlo.add %arg0, %arg0 : tensor<4x4xf32>
   %1 = "xla_hlo.dot"(%0, %arg0) : (tensor<4x4xf32>, tensor<4x4xf32>) -> tensor<4x4xf32>
-  %2 = xla_hlo.mul %1, %arg0 : tensor<4x4xf32>
+  %2 = xla_hlo.multiply %1, %arg0 : tensor<4x4xf32>
   return %2 : tensor<4x4xf32>
 }
 
@@ -64,13 +64,13 @@
 func @caller(%arg0 : tensor<4xf32>) -> tensor<4xf32> {
   %0 = xla_hlo.add %arg0, %arg0 : tensor<4xf32>
   %1 = call @callee(%0) : (tensor<4xf32>) -> tensor<4xf32>
-  %2 = xla_hlo.mul %1, %arg0 : tensor<4xf32>
+  %2 = xla_hlo.multiply %1, %arg0 : tensor<4xf32>
   return %2 : tensor<4xf32>
 }
 // CHECK-LABEL: func @callee
 // CHECK-SAME: dispatchable = true
 func @callee(%arg0 : tensor<4xf32>) -> tensor<4xf32> {
-  %0 = xla_hlo.mul %arg0, %arg0 : tensor<4xf32>
+  %0 = xla_hlo.multiply %arg0, %arg0 : tensor<4xf32>
   return %0 : tensor<4xf32>
 }
 
@@ -81,7 +81,7 @@
 func @dotCaller(%arg0 : tensor<4x4xf32>) -> tensor<4x4xf32> {
   %0 = xla_hlo.add %arg0, %arg0 : tensor<4x4xf32>
   %1 = call @dotCallee(%0) : (tensor<4x4xf32>) -> tensor<4x4xf32>
-  %2 = xla_hlo.mul %1, %arg0 : tensor<4x4xf32>
+  %2 = xla_hlo.multiply %1, %arg0 : tensor<4x4xf32>
   return %2 : tensor<4x4xf32>
 }
 // CHECK-LABEL: func @dotCallee
diff --git a/iree/compiler/Dialect/Flow/IR/test/stream_ops.mlir b/iree/compiler/Dialect/Flow/IR/test/stream_ops.mlir
index 1d13a98..ebb3f45 100644
--- a/iree/compiler/Dialect/Flow/IR/test/stream_ops.mlir
+++ b/iree/compiler/Dialect/Flow/IR/test/stream_ops.mlir
@@ -6,7 +6,7 @@
   flow.dispatch.entry @rgn_dispatch_0
   module {
     func @rgn_dispatch_0(%arg0: tensor<4xf32>) -> tensor<4xf32> {
-      %0 = xla_hlo.mul %arg0, %arg0 : tensor<4xf32>
+      %0 = xla_hlo.multiply %arg0, %arg0 : tensor<4xf32>
       return %0 : tensor<4xf32>
     }
   }
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/fold_compatible_dispatch_regions.mlir b/iree/compiler/Dialect/Flow/Transforms/test/fold_compatible_dispatch_regions.mlir
index 2d6174b..4e1a7ef 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/fold_compatible_dispatch_regions.mlir
+++ b/iree/compiler/Dialect/Flow/Transforms/test/fold_compatible_dispatch_regions.mlir
@@ -26,11 +26,11 @@
     flow.return %1 : tensor<4xf32>
   }
   %2 = flow.dispatch.region[%cst : index](%arg2 = %arg0 : tensor<4xf32>, %arg3 = %0 : tensor<4xf32>) -> tensor<4xf32> {
-    %3 = xla_hlo.sub %arg3, %arg2 : tensor<4xf32>
+    %3 = xla_hlo.subtract %arg3, %arg2 : tensor<4xf32>
     flow.return %3 : tensor<4xf32>
   }
   %4 = flow.dispatch.region[%cst : index](%arg4 = %arg0 : tensor<4xf32>, %arg5 = %2 : tensor<4xf32>) -> tensor<4xf32> {
-    %5 = xla_hlo.mul %arg4, %arg5 : tensor<4xf32>
+    %5 = xla_hlo.multiply %arg4, %arg5 : tensor<4xf32>
     flow.return %5 : tensor<4xf32>
   }
   return %4 : tensor<4xf32>
@@ -40,8 +40,8 @@
 // CHECK: %[[WORKLOAD0:.+]] = constant 4
 // CHECK: %[[R0:.+]] = flow.dispatch.region[%[[WORKLOAD0]] : index](%arg1 = %arg0 : tensor<4xf32>) -> tensor<4xf32> {
 // CHECK-NEXT:   %1 = xla_hlo.add %arg1, %arg1 : tensor<4xf32>
-// CHECK-NEXT:   %2 = xla_hlo.sub %1, %arg1 : tensor<4xf32>
-// CHECK-NEXT:   %3 = xla_hlo.mul %arg1, %2 : tensor<4xf32>
+// CHECK-NEXT:   %2 = xla_hlo.subtract %1, %arg1 : tensor<4xf32>
+// CHECK-NEXT:   %3 = xla_hlo.multiply %arg1, %2 : tensor<4xf32>
 // CHECK-NEXT:   flow.return %3 : tensor<4xf32>
 // CHECK-NEXT: }
 // CHECK: return %[[R0]] : tensor<4xf32>
@@ -61,7 +61,7 @@
   }
   %cst_1 = constant 16 : index
   %2 = flow.dispatch.region[%cst_1 : index](%arg1 = %1 : tensor<4x4xf32>, %arg2 = %arg0 : tensor<4x4xf32>) -> tensor<4x4xf32> {
-    %3 = xla_hlo.mul %arg1, %arg2 : tensor<4x4xf32>
+    %3 = xla_hlo.multiply %arg1, %arg2 : tensor<4x4xf32>
     flow.return %3 : tensor<4x4xf32>
   }
   return %2 : tensor<4x4xf32>
@@ -80,7 +80,7 @@
 // CHECK-NEXT: }
 // CHECK-NEXT: %[[WORKLOAD2:.+]] = constant 16 : index
 // CHECK-NEXT: %[[R2:.+]] = flow.dispatch.region[%[[WORKLOAD2]] : index](%arg1 = %[[R1]] : tensor<4x4xf32>, %arg2 = %arg0 : tensor<4x4xf32>) -> tensor<4x4xf32> {
-// CHECK-NEXT:   %3 = xla_hlo.mul %arg1, %arg2 : tensor<4x4xf32>
+// CHECK-NEXT:   %3 = xla_hlo.multiply %arg1, %arg2 : tensor<4x4xf32>
 // CHECK-NEXT:   flow.return %3 : tensor<4x4xf32>
 // CHECK-NEXT: }
 // CHECK-NEXT: return %[[R2]] : tensor<4x4xf32>
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/form_streams.mlir b/iree/compiler/Dialect/Flow/Transforms/test/form_streams.mlir
index 791c7c2..2ba49f2 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/form_streams.mlir
+++ b/iree/compiler/Dialect/Flow/Transforms/test/form_streams.mlir
@@ -148,7 +148,7 @@
   }
   module {
     func @interleavedDot_rgn_dispatch_2(%arg0: tensor<4x4xf32>, %arg1: tensor<4x4xf32>) -> tensor<4x4xf32> {
-      %0 = xla_hlo.mul %arg0, %arg1 : tensor<4x4xf32>
+      %0 = xla_hlo.multiply %arg0, %arg1 : tensor<4x4xf32>
       return %0 : tensor<4x4xf32>
     }
   }
@@ -189,7 +189,7 @@
   }
   module {
     func @caller_rgn_dispatch_1(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32> {
-      %0 = xla_hlo.mul %arg0, %arg1 : tensor<4xf32>
+      %0 = xla_hlo.multiply %arg0, %arg1 : tensor<4xf32>
       return %0 : tensor<4xf32>
     }
   }
@@ -217,7 +217,7 @@
   flow.dispatch.entry @callee_rgn_dispatch_0
   module {
     func @callee_rgn_dispatch_0(%arg0: tensor<4xf32>) -> tensor<4xf32> {
-      %0 = xla_hlo.mul %arg0, %arg0 : tensor<4xf32>
+      %0 = xla_hlo.multiply %arg0, %arg0 : tensor<4xf32>
       return %0 : tensor<4xf32>
     }
   }
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/identify_dispatch_regions.mlir b/iree/compiler/Dialect/Flow/Transforms/test/identify_dispatch_regions.mlir
index c2cebbf..2150552 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/identify_dispatch_regions.mlir
+++ b/iree/compiler/Dialect/Flow/Transforms/test/identify_dispatch_regions.mlir
@@ -52,10 +52,10 @@
   // CHECK-SAME: (%arg1 = %arg0 : tensor<4xf32>) -> tensor<4xf32> {
   // CHECK-NEXT:   %1 = xla_hlo.add %arg1, %arg1 : tensor<4xf32>
   %0 = xla_hlo.add %arg0, %arg0 : tensor<4xf32>
-  // CHECK-NEXT:   %2 = xla_hlo.sub %1, %arg1 : tensor<4xf32>
-  %1 = xla_hlo.sub %0, %arg0 : tensor<4xf32>
-  // CHECK-NEXT:   %3 = xla_hlo.mul %2, %arg1 : tensor<4xf32>
-  %2 = xla_hlo.mul %1, %arg0 : tensor<4xf32>
+  // CHECK-NEXT:   %2 = xla_hlo.subtract %1, %arg1 : tensor<4xf32>
+  %1 = xla_hlo.subtract %0, %arg0 : tensor<4xf32>
+  // CHECK-NEXT:   %3 = xla_hlo.multiply %2, %arg1 : tensor<4xf32>
+  %2 = xla_hlo.multiply %1, %arg0 : tensor<4xf32>
   // CHECK-NEXT:   flow.return %3 : tensor<4xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return %0 : tensor<4xf32>
@@ -88,8 +88,8 @@
   // CHECK: %[[R2:.+]] = flow.dispatch.region
   // CHECK-SAME: [%[[WORKLOAD2]] : index]
   // CHECK-SAME: (%arg1 = %[[R1]] : tensor<4x4xf32>, %arg2 = %arg0 : tensor<4x4xf32>) -> tensor<4x4xf32> {
-  // CHECK-NEXT:   %3 = xla_hlo.mul %arg1, %arg2 : tensor<4x4xf32>
-  %2 = xla_hlo.mul %1, %arg0 : tensor<4x4xf32>
+  // CHECK-NEXT:   %3 = xla_hlo.multiply %arg1, %arg2 : tensor<4x4xf32>
+  %2 = xla_hlo.multiply %1, %arg0 : tensor<4x4xf32>
   // CHECK-NEXT:   flow.return %3 : tensor<4x4xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return %[[R2]] : tensor<4x4xf32>
@@ -108,8 +108,8 @@
   %0 = xla_hlo.add %arg0, %arg0 : tensor<4xf32>
   // CHECK-NEXT:   %2 = call @callee(%1) : (tensor<4xf32>) -> tensor<4xf32>
   %1 = call @callee(%0) : (tensor<4xf32>) -> tensor<4xf32>
-  // CHECK-NEXT:   %3 = xla_hlo.mul %2, %arg1 : tensor<4xf32>
-  %2 = xla_hlo.mul %1, %arg0 : tensor<4xf32>
+  // CHECK-NEXT:   %3 = xla_hlo.multiply %2, %arg1 : tensor<4xf32>
+  %2 = xla_hlo.multiply %1, %arg0 : tensor<4xf32>
   // CHECK-NEXT:   flow.return %3 : tensor<4xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return %[[R0]] : tensor<4xf32>
@@ -121,8 +121,8 @@
   // CHECK: %[[R0:.+]] = flow.dispatch.region
   // CHECK-SAME: [%[[WORKLOAD0]] : index]
   // CHECK-SAME: (%arg1 = %arg0 : tensor<4xf32>) -> tensor<4xf32> {
-  // CHECK-NEXT:   %1 = xla_hlo.mul %arg1, %arg1 : tensor<4xf32>
-  %0 = xla_hlo.mul %arg0, %arg0 : tensor<4xf32>
+  // CHECK-NEXT:   %1 = xla_hlo.multiply %arg1, %arg1 : tensor<4xf32>
+  %0 = xla_hlo.multiply %arg0, %arg0 : tensor<4xf32>
   // CHECK-NEXT:   flow.return %1 : tensor<4xf32>
   // CHECK-NEXT: }
   // CHECK: return %[[R0]] : tensor<4xf32>
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/transformation.mlir b/iree/compiler/Dialect/Flow/Transforms/test/transformation.mlir
index 75fd8c3..07084e7 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/transformation.mlir
+++ b/iree/compiler/Dialect/Flow/Transforms/test/transformation.mlir
@@ -64,8 +64,8 @@
 
 func @hloElementwiseOps(%arg0 : tensor<4xf32>) -> tensor<4xf32> {
   %0 = xla_hlo.add %arg0, %arg0 : tensor<4xf32>
-  %1 = xla_hlo.sub %0, %arg0 : tensor<4xf32>
-  %2 = xla_hlo.mul %1, %arg0 : tensor<4xf32>
+  %1 = xla_hlo.subtract %0, %arg0 : tensor<4xf32>
+  %2 = xla_hlo.multiply %1, %arg0 : tensor<4xf32>
   return %2 : tensor<4xf32>
 }
 
@@ -74,8 +74,8 @@
 // CHECK-NEXT:   module {
 // CHECK-NEXT:     func @hloElementwiseOps_ex_dispatch_0(%arg0: tensor<4xf32>) -> tensor<4xf32> {
 // CHECK-NEXT:       %0 = xla_hlo.add %arg0, %arg0 : tensor<4xf32>
-// CHECK-NEXT:       %1 = xla_hlo.sub %0, %arg0 : tensor<4xf32>
-// CHECK-NEXT:       %2 = xla_hlo.mul %1, %arg0 : tensor<4xf32>
+// CHECK-NEXT:       %1 = xla_hlo.subtract %0, %arg0 : tensor<4xf32>
+// CHECK-NEXT:       %2 = xla_hlo.multiply %1, %arg0 : tensor<4xf32>
 // CHECK-NEXT:       return %2 : tensor<4xf32>
 // CHECK-NEXT:     }
 // CHECK-NEXT:   }
@@ -94,7 +94,7 @@
 func @interleavedDot(%arg0 : tensor<4x4xf32>) -> tensor<4x4xf32> {
   %0 = xla_hlo.add %arg0, %arg0 : tensor<4x4xf32>
   %1 = "xla_hlo.dot"(%0, %arg0) : (tensor<4x4xf32>, tensor<4x4xf32>) -> tensor<4x4xf32>
-  %2 = xla_hlo.mul %1, %arg0 : tensor<4x4xf32>
+  %2 = xla_hlo.multiply %1, %arg0 : tensor<4x4xf32>
   return %2 : tensor<4x4xf32>
 }
 
@@ -120,7 +120,7 @@
 // CHECK-NEXT:   flow.dispatch.entry @interleavedDot_ex_dispatch_2
 // CHECK-NEXT:   module {
 // CHECK-NEXT:     func @interleavedDot_ex_dispatch_2(%arg0: tensor<4x4xf32>, %arg1: tensor<4x4xf32>) -> tensor<4x4xf32> {
-// CHECK-NEXT:       %0 = xla_hlo.mul %arg0, %arg1 : tensor<4x4xf32>
+// CHECK-NEXT:       %0 = xla_hlo.multiply %arg0, %arg1 : tensor<4x4xf32>
 // CHECK-NEXT:       return %0 : tensor<4x4xf32>
 // CHECK-NEXT:     }
 // CHECK-NEXT:   }
diff --git a/iree/compiler/Translation/CodegenPasses/test/arithmetic_ops.mlir b/iree/compiler/Translation/CodegenPasses/test/arithmetic_ops.mlir
index af5edb8..c76ab07 100644
--- a/iree/compiler/Translation/CodegenPasses/test/arithmetic_ops.mlir
+++ b/iree/compiler/Translation/CodegenPasses/test/arithmetic_ops.mlir
@@ -45,7 +45,7 @@
 // CHECK: [[MAP0:#.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK: func @subf
 func @subf(%operand: tensor<2x2xf32>) {
-  %result = "xla_hlo.sub"(%operand, %operand)
+  %result = "xla_hlo.subtract"(%operand, %operand)
       : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
   return
 }
@@ -66,7 +66,7 @@
 // CHECK: [[MAP0:#.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK: func @subi
 func @subi(%operand: tensor<2x2xi32>) {
-  %result = "xla_hlo.sub"(%operand, %operand)
+  %result = "xla_hlo.subtract"(%operand, %operand)
       : (tensor<2x2xi32>, tensor<2x2xi32>) -> tensor<2x2xi32>
   return
 }
@@ -87,7 +87,7 @@
 // CHECK: [[MAP0:#.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK: func @mulf
 func @mulf(%operand: tensor<2x2xf32>) {
-  %result = "xla_hlo.mul"(%operand, %operand)
+  %result = "xla_hlo.multiply"(%operand, %operand)
       : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
   return
 }
@@ -108,7 +108,7 @@
 // CHECK: [[MAP0:#.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK: func @muli
 func @muli(%operand: tensor<2x2xi32>) {
-  %result = "xla_hlo.mul"(%operand, %operand)
+  %result = "xla_hlo.multiply"(%operand, %operand)
       : (tensor<2x2xi32>, tensor<2x2xi32>) -> tensor<2x2xi32>
   return
 }
@@ -129,7 +129,7 @@
 // CHECK: [[MAP0:#.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK: func @divf
 func @divf(%operand: tensor<2x2xf32>) {
-  %result = "xla_hlo.div"(%operand, %operand)
+  %result = "xla_hlo.divide"(%operand, %operand)
       : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
   return
 }
@@ -150,7 +150,7 @@
 // CHECK: [[MAP0:#.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK: func @divi
 func @divi(%operand: tensor<2x2xi32>) {
-  %result = "xla_hlo.div"(%operand, %operand)
+  %result = "xla_hlo.divide"(%operand, %operand)
       : (tensor<2x2xi32>, tensor<2x2xi32>) -> tensor<2x2xi32>
   return
 }
diff --git a/iree/compiler/Translation/CodegenPasses/test/pw_linalg_fusion.mlir b/iree/compiler/Translation/CodegenPasses/test/pw_linalg_fusion.mlir
index 8d697b7..2dd9997 100644
--- a/iree/compiler/Translation/CodegenPasses/test/pw_linalg_fusion.mlir
+++ b/iree/compiler/Translation/CodegenPasses/test/pw_linalg_fusion.mlir
@@ -14,7 +14,7 @@
   // CHECK: [[TEMP:%[a-zA-Z0-9_]*]] = muli [[ARG0]], [[ARG1]]
   // CHECK: addi [[TEMP]], [[ARG2]]
   // CHECK-NOT: linalg.generic
-  %4 = "xla_hlo.mul"(%0, %1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
+  %4 = "xla_hlo.multiply"(%0, %1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
   %5 = "xla_hlo.add"(%4, %2) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
   iree.store_output(%5 : tensor<4x8xi32>, %arg3 : memref<4x8xi32>)
   return
@@ -39,9 +39,9 @@
   // CHECK: [[TEMP2:%[a-zA-Z0-9_]*]] = addi [[TEMP1]], [[ARG2]]
   // CHECK: subi [[TEMP2]], [[ARG3]]
   // CHECK-NOT: linalg.generic
-  %4 = "xla_hlo.mul"(%0, %1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
+  %4 = "xla_hlo.multiply"(%0, %1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
   %5 = "xla_hlo.add"(%4, %2) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  %6 = "xla_hlo.sub"(%5, %3) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
+  %6 = "xla_hlo.subtract"(%5, %3) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
   iree.store_output(%6 : tensor<4x8xi32>, %arg4 : memref<4x8xi32>)
   return
 }
@@ -65,9 +65,9 @@
   // CHECK-DAG: [[TEMP2:%[a-zA-Z0-9_]*]] = addi [[ARG2]], [[ARG3]]
   // CHECK: subi [[TEMP1]], [[TEMP2]]
   // CHECK-NOT: linalg.generic
-  %4 = "xla_hlo.mul"(%0, %1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
+  %4 = "xla_hlo.multiply"(%0, %1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
   %5 = "xla_hlo.add"(%2, %3) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  %6 = "xla_hlo.sub"(%4, %5) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
+  %6 = "xla_hlo.subtract"(%4, %5) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
   iree.store_output(%6 : tensor<4x8xi32>, %arg4 : memref<4x8xi32>)
   return
 }
@@ -90,9 +90,9 @@
   // CHECK-DAG: [[TEMP2:%[a-zA-Z0-9_]*]] = addi [[ARG2]], [[ARG3]]
   // CHECK: subi [[TEMP1]], [[TEMP2]]
   // CHECK-NOT: linalg.generic
-  %3 = "xla_hlo.mul"(%0, %1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
+  %3 = "xla_hlo.multiply"(%0, %1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
   %4 = "xla_hlo.add"(%0, %2) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  %5 = "xla_hlo.sub"(%3, %4) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
+  %5 = "xla_hlo.subtract"(%3, %4) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
   iree.store_output(%5 : tensor<4x8xi32>, %arg3 : memref<4x8xi32>)
   return
 }
diff --git a/iree/compiler/Translation/CodegenPasses/test/reduce.mlir b/iree/compiler/Translation/CodegenPasses/test/reduce.mlir
index 1e4c52d..b5c7e23 100644
--- a/iree/compiler/Translation/CodegenPasses/test/reduce.mlir
+++ b/iree/compiler/Translation/CodegenPasses/test/reduce.mlir
@@ -43,7 +43,7 @@
     %1 = iree.load_input(%arg1 : memref<f32>) : tensor<f32>
     %2 = "xla_hlo.reduce"(%0, %1) ( {
     ^bb0(%arg3: tensor<f32>, %arg4 : tensor<f32>):
-      %3 = xla_hlo.min %arg3, %arg4 : tensor<f32>
+      %3 = xla_hlo.minimum %arg3, %arg4 : tensor<f32>
       "xla_hlo.return"(%3) : (tensor<f32>) -> ()
     }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<5x4xf32>, tensor<f32>) -> tensor<5xf32>
     iree.store_output(%2 : tensor<5xf32>, %arg2 : memref<5xf32>)
@@ -62,7 +62,7 @@
     %1 = iree.load_input(%arg1 : memref<f32>) : tensor<f32>
     %2 = "xla_hlo.reduce"(%0, %1) ( {
     ^bb0(%arg3: tensor<f32>, %arg4 : tensor<f32>):
-      %3 = xla_hlo.max %arg3, %arg4 : tensor<f32>
+      %3 = xla_hlo.maximum %arg3, %arg4 : tensor<f32>
       "xla_hlo.return"(%3) : (tensor<f32>) -> ()
     }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<5x4xf32>, tensor<f32>) -> tensor<5xf32>
     iree.store_output(%2 : tensor<5xf32>, %arg2 : memref<5xf32>)
@@ -81,7 +81,7 @@
     %1 = iree.load_input(%arg1 : memref<f32>) : tensor<f32>
     %2 = "xla_hlo.reduce"(%0, %1) ( {
     ^bb0(%arg3: tensor<f32>, %arg4 : tensor<f32>):
-      %3 = xla_hlo.max %arg3, %arg4 : tensor<f32>
+      %3 = xla_hlo.maximum %arg3, %arg4 : tensor<f32>
       "xla_hlo.return"(%3) : (tensor<f32>) -> ()
     }) {dimensions = dense<0> : tensor<1xi64>} : (tensor<5x4xf32>, tensor<f32>) -> tensor<4xf32>
     iree.store_output(%2 : tensor<4xf32>, %arg2 : memref<4xf32>)
diff --git a/iree/compiler/Translation/SPIRV/XLAToSPIRV/test/max.mlir b/iree/compiler/Translation/SPIRV/XLAToSPIRV/test/max.mlir
index 2426783..c66e8bc 100644
--- a/iree/compiler/Translation/SPIRV/XLAToSPIRV/test/max.mlir
+++ b/iree/compiler/Translation/SPIRV/XLAToSPIRV/test/max.mlir
@@ -6,7 +6,7 @@
     %0 = iree.load_input(%arg0 : memref<12x42xf32>) : tensor<12x42xf32>
     %1 = iree.load_input(%arg1 : memref<12x42xf32>) : tensor<12x42xf32>
     //CHECK: [[COMPARE:%.*]] = spv.GLSL.FMax [[VAL1:%.*]], [[VAL2:%.*]] : f32
-    %2 = xla_hlo.max %0, %1 : tensor<12x42xf32>
+    %2 = xla_hlo.maximum %0, %1 : tensor<12x42xf32>
     iree.store_output(%2 : tensor<12x42xf32>, %arg2 : memref<12x42xf32>)
     return
   }
@@ -20,7 +20,7 @@
     %0 = iree.load_input(%arg0 : memref<12x42xi32>) : tensor<12x42xi32>
     %1 = iree.load_input(%arg1 : memref<12x42xi32>) : tensor<12x42xi32>
     //CHECK: [[COMPARE:%.*]] = spv.GLSL.SMax [[VAL1:%.*]], [[VAL2:%.*]] : i32
-    %2 = xla_hlo.max %0, %1 : tensor<12x42xi32>
+    %2 = xla_hlo.maximum %0, %1 : tensor<12x42xi32>
     iree.store_output(%2 : tensor<12x42xi32>, %arg2 : memref<12x42xi32>)
     return
   }
diff --git a/iree/compiler/Translation/test/smoketest.mlir b/iree/compiler/Translation/test/smoketest.mlir
index 8d86363..38b45dc 100644
--- a/iree/compiler/Translation/test/smoketest.mlir
+++ b/iree/compiler/Translation/test/smoketest.mlir
@@ -44,8 +44,8 @@
 // CHECK: local_name: "hloElementwiseOps"
 func @hloElementwiseOps(%arg0 : tensor<4xf32>) -> tensor<4xf32> attributes {iree.module.export} {
   %0 = xla_hlo.add %arg0, %arg0 : tensor<4xf32>
-  %1 = xla_hlo.sub %0, %arg0 : tensor<4xf32>
-  %2 = xla_hlo.mul %1, %arg0 : tensor<4xf32>
+  %1 = xla_hlo.subtract %0, %arg0 : tensor<4xf32>
+  %2 = xla_hlo.multiply %1, %arg0 : tensor<4xf32>
   return %2 : tensor<4xf32>
 }
 }
diff --git a/iree/samples/simple_embedding/simple_embedding_test.mlir b/iree/samples/simple_embedding/simple_embedding_test.mlir
index ade3e21..2839639 100644
--- a/iree/samples/simple_embedding/simple_embedding_test.mlir
+++ b/iree/samples/simple_embedding/simple_embedding_test.mlir
@@ -1,5 +1,5 @@
 func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
     attributes { iree.module.export } {
-  %0 = "xla_hlo.mul"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
+  %0 = "xla_hlo.multiply"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
   return %0 : tensor<4xf32>
 }
diff --git a/iree/samples/vulkan/simple_mul.mlir b/iree/samples/vulkan/simple_mul.mlir
index ade3e21..2839639 100644
--- a/iree/samples/vulkan/simple_mul.mlir
+++ b/iree/samples/vulkan/simple_mul.mlir
@@ -1,5 +1,5 @@
 func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
     attributes { iree.module.export } {
-  %0 = "xla_hlo.mul"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
+  %0 = "xla_hlo.multiply"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
   return %0 : tensor<4xf32>
 }
diff --git a/test/e2e/linalg_path/pw_add_mul.mlir b/test/e2e/linalg_path/pw_add_mul.mlir
index 3a02b53..27893d9 100644
--- a/test/e2e/linalg_path/pw_add_mul.mlir
+++ b/test/e2e/linalg_path/pw_add_mul.mlir
@@ -7,7 +7,7 @@
     %0 = iree.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15, 16], [17, 18, 19, 20, 21, 22, 23, 24], [25, 26, 27, 28, 29, 30, 31, 32]]> : tensor<4x8xi32>
     %1 = iree.unfoldable_constant dense<[[2, 4, 6, 8, 10, 12, 14, 16], [18, 20, 22, 24, 26, 28, 30, 32], [34, 36, 38, 40, 42, 44, 46, 48], [50, 52, 54, 56, 58, 60, 62, 64]]> : tensor<4x8xi32>
     %2 = iree.unfoldable_constant dense<[[3, 6, 9, 12, 15, 18, 21, 24], [27, 30, 33, 36, 39, 42, 45, 48], [51, 54, 57, 60, 63, 66, 69, 72], [75, 78, 81, 84, 87, 90, 93, 96]]> : tensor<4x8xi32>
-    %3 = "xla_hlo.mul"(%0, %1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
+    %3 = "xla_hlo.multiply"(%0, %1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
     %4 = "xla_hlo.add"(%3, %2) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
     return %4 : tensor<4x8xi32>
   }
diff --git a/test/e2e/xla/collatz.mlir b/test/e2e/xla/collatz.mlir
index cd170aa..9cd91c7 100644
--- a/test/e2e/xla/collatz.mlir
+++ b/test/e2e/xla/collatz.mlir
@@ -26,11 +26,11 @@
   %20 = extract_element %19[] : tensor<i1>
   cond_br %20, ^bb3, ^bb4
 ^bb3: // pred: ^bb2
-  %21 = xla_hlo.mul %9, %1 : tensor<f32>
+  %21 = xla_hlo.multiply %9, %1 : tensor<f32>
   %22 = xla_hlo.add %21, %0 : tensor<f32>
   br ^bb5(%22 : tensor<f32>)
 ^bb4: // pred: ^bb2
-  %23 = xla_hlo.div %9, %2 : tensor<f32>
+  %23 = xla_hlo.divide %9, %2 : tensor<f32>
   br ^bb5(%23 : tensor<f32>)
 ^bb5(%24: tensor<f32>): // 2 preds: ^bb3, ^bb4
   br ^bb1(%10, %24 : tensor<f32>, tensor<f32>)
diff --git a/test/e2e/xla/fragment_000.mlir b/test/e2e/xla/fragment_000.mlir
index 000489c..169ebe7 100644
--- a/test/e2e/xla/fragment_000.mlir
+++ b/test/e2e/xla/fragment_000.mlir
@@ -17,7 +17,7 @@
   %11 = "xla_hlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.67"} : (tensor<f32>) -> tensor<5x5xf32>
   %12 = "xla_hlo.broadcast_in_dim"(%4) {broadcast_dimensions = dense<1> : tensor<1xi64>, name = "broadcast.64"} : (tensor<5xf32>) -> tensor<5x5xf32>
   %13 = addf %3, %12 : tensor<5x5xf32>
-  %14 = xla_hlo.max %11, %13 {name = "maximum.68"} : tensor<5x5xf32>
+  %14 = xla_hlo.maximum %11, %13 {name = "maximum.68"} : tensor<5x5xf32>
   %15 = "xla_hlo.reshape"(%14) {name = "reshape.70"} : (tensor<5x5xf32>) -> tensor<5x1x5xf32>
   %16 = "xla_hlo.select"(%9, %10, %15) {name = "select.71"} : (tensor<5x1x5xi1>, tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xf32>
   %17 = "xla_hlo.copy"(%16) {name = "copy.4"} : (tensor<5x1x5xf32>) -> tensor<5x1x5xf32>
diff --git a/test/e2e/xla/fullyconnected.mlir b/test/e2e/xla/fullyconnected.mlir
index c30a6b3..2774e88 100644
--- a/test/e2e/xla/fullyconnected.mlir
+++ b/test/e2e/xla/fullyconnected.mlir
@@ -13,7 +13,7 @@
   %4 = "xla_hlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>, name = "broadcast.56"} : (tensor<5x1xf32>) -> tensor<5x1x5xf32>
   %cst = constant  {name = "constant.22"} dense<1.000000e+00> : tensor<f32>
   %5 = "xla_hlo.broadcast_in_dim"(%cst) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.23"} : (tensor<f32>) -> tensor<5x1x5xf32>
-  %6 = xla_hlo.mul %4, %5 {name = "multiply.57"} : tensor<5x1x5xf32>
+  %6 = xla_hlo.multiply %4, %5 {name = "multiply.57"} : tensor<5x1x5xf32>
   %cst_0 = constant  {name = "constant.58"} dense<0.000000e+00> : tensor<f32>
   %7 = "xla_hlo.broadcast_in_dim"(%cst_0) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.59"} : (tensor<f32>) -> tensor<5x1x5xf32>
   %8 = "xla_hlo.compare"(%6, %7) {comparison_direction = "GT", name = "compare.60"} : (tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xi1>
@@ -25,7 +25,7 @@
   %12 = "xla_hlo.broadcast_in_dim"(%11) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>, name = "broadcast.50"} : (tensor<5x1xf32>) -> tensor<5x1x5xf32>
   %cst_3 = constant  {name = "constant.15"} dense<1.000000e+00> : tensor<f32>
   %13 = "xla_hlo.broadcast_in_dim"(%cst_3) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.16"} : (tensor<f32>) -> tensor<5x1x5xf32>
-  %14 = xla_hlo.mul %12, %13 {name = "multiply.51"} : tensor<5x1x5xf32>
+  %14 = xla_hlo.multiply %12, %13 {name = "multiply.51"} : tensor<5x1x5xf32>
   %cst_4 = constant  {name = "constant.52"} dense<0.000000e+00> : tensor<f32>
   %15 = "xla_hlo.broadcast_in_dim"(%cst_4) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.53"} : (tensor<f32>) -> tensor<5x1x5xf32>
   %16 = "xla_hlo.compare"(%14, %15) {comparison_direction = "GT", name = "compare.54"} : (tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xi1>
@@ -37,7 +37,7 @@
   %20 = "xla_hlo.broadcast_in_dim"(%19) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>, name = "broadcast.44"} : (tensor<5x1xf32>) -> tensor<5x1x5xf32>
   %cst_7 = constant  {name = "constant.8"} dense<1.000000e+00> : tensor<f32>
   %21 = "xla_hlo.broadcast_in_dim"(%cst_7) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.9"} : (tensor<f32>) -> tensor<5x1x5xf32>
-  %22 = xla_hlo.mul %20, %21 {name = "multiply.45"} : tensor<5x1x5xf32>
+  %22 = xla_hlo.multiply %20, %21 {name = "multiply.45"} : tensor<5x1x5xf32>
   %cst_8 = constant  {name = "constant.46"} dense<0.000000e+00> : tensor<f32>
   %23 = "xla_hlo.broadcast_in_dim"(%cst_8) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.47"} : (tensor<f32>) -> tensor<5x1x5xf32>
   %24 = "xla_hlo.compare"(%22, %23) {comparison_direction = "GT", name = "compare.48"} : (tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xi1>
@@ -55,7 +55,7 @@
   %cst_12 = constant  {name = "constant.63"} dense<[0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00]> : tensor<5xf32>
   %33 = "xla_hlo.broadcast_in_dim"(%cst_12) {broadcast_dimensions = dense<[1]> : tensor<1xi64>, name = "broadcast.64"} : (tensor<5xf32>) -> tensor<5x5xf32>
   %34 = xla_hlo.add %32, %33 {name = "add.65"} : tensor<5x5xf32>
-  %35 = xla_hlo.max %26, %34 {name = "maximum.68"} : tensor<5x5xf32>
+  %35 = xla_hlo.maximum %26, %34 {name = "maximum.68"} : tensor<5x5xf32>
   %36 = "xla_hlo.reshape"(%35) {name = "reshape.70"} : (tensor<5x5xf32>) -> tensor<5x1x5xf32>
   %37 = "xla_hlo.select"(%24, %25, %36) {name = "select.71"} : (tensor<5x1x5xi1>, tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xf32>
   %38 = "xla_hlo.copy"(%37) {name = "copy.4"} : (tensor<5x1x5xf32>) -> tensor<5x1x5xf32>
@@ -65,7 +65,7 @@
   %cst_14 = constant  {name = "constant.75"} dense<[0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00]> : tensor<5xf32>
   %41 = "xla_hlo.broadcast_in_dim"(%cst_14) {broadcast_dimensions = dense<[1]> : tensor<1xi64>, name = "broadcast.76"} : (tensor<5xf32>) -> tensor<5x5xf32>
   %42 = xla_hlo.add %40, %41 {name = "add.77"} : tensor<5x5xf32>
-  %43 = xla_hlo.max %18, %42 {name = "maximum.80"} : tensor<5x5xf32>
+  %43 = xla_hlo.maximum %18, %42 {name = "maximum.80"} : tensor<5x5xf32>
   %44 = "xla_hlo.reshape"(%43) {name = "reshape.82"} : (tensor<5x5xf32>) -> tensor<5x1x5xf32>
   %45 = "xla_hlo.select"(%16, %17, %44) {name = "select.83"} : (tensor<5x1x5xi1>, tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xf32>
   %46 = "xla_hlo.copy"(%45) {name = "copy.5"} : (tensor<5x1x5xf32>) -> tensor<5x1x5xf32>
@@ -75,7 +75,7 @@
   %cst_16 = constant  {name = "constant.87"} dense<[0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00]> : tensor<5xf32>
   %49 = "xla_hlo.broadcast_in_dim"(%cst_16) {broadcast_dimensions = dense<[1]> : tensor<1xi64>, name = "broadcast.88"} : (tensor<5xf32>) -> tensor<5x5xf32>
   %50 = xla_hlo.add %48, %49 {name = "add.89"} : tensor<5x5xf32>
-  %51 = xla_hlo.max %10, %50 {name = "maximum.92"} : tensor<5x5xf32>
+  %51 = xla_hlo.maximum %10, %50 {name = "maximum.92"} : tensor<5x5xf32>
   %52 = "xla_hlo.reshape"(%51) {name = "reshape.94"} : (tensor<5x5xf32>) -> tensor<5x1x5xf32>
   %53 = "xla_hlo.select"(%8, %9, %52) {name = "select.95"} : (tensor<5x1x5xi1>, tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xf32>
   %54 = "xla_hlo.reshape"(%53) {name = "reshape.96"} : (tensor<5x1x5xf32>) -> tensor<5x1x5xf32>
diff --git a/test/e2e/xla/max_float.mlir b/test/e2e/xla/max_float.mlir
index 7a2ceba..01dfe9a 100644
--- a/test/e2e/xla/max_float.mlir
+++ b/test/e2e/xla/max_float.mlir
@@ -5,7 +5,7 @@
 func @tensor() -> tensor<4xf32> {
   %lhs = iree.unfoldable_constant dense<[1.0, 2.0, 7.0, 4.0]> : tensor<4xf32>
   %rhs = iree.unfoldable_constant dense<[5.0, 2.0, 3.0, 4.0]> : tensor<4xf32>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
   return %result : tensor<4xf32>
 }
 // CHECK: 4xf32=1 2 3 4
@@ -16,7 +16,7 @@
 func @scalar() -> tensor<f32> {
   %lhs = iree.unfoldable_constant dense<1.0> : tensor<f32>
   %rhs = iree.unfoldable_constant dense<2.0> : tensor<f32>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32>
   return %result : tensor<f32>
 }
 // CHECK: f32=1
@@ -27,7 +27,7 @@
 func @double() -> tensor<f64> {
   %lhs = iree.unfoldable_constant dense<1.0> : tensor<f64>
   %rhs = iree.unfoldable_constant dense<2.0> : tensor<f64>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<f64>, tensor<f64>) -> tensor<f64>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<f64>, tensor<f64>) -> tensor<f64>
   return %result : tensor<f64>
 }
 // CHECK: f32=1
@@ -38,7 +38,7 @@
 func @negative() -> tensor<f32> {
   %lhs = iree.unfoldable_constant dense<1.0> : tensor<f32>
   %rhs = iree.unfoldable_constant dense<-2.0> : tensor<f32>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32>
   return %result : tensor<f32>
 }
 // CHECK: f32=-2
diff --git a/test/e2e/xla/max_int.mlir b/test/e2e/xla/max_int.mlir
index 69c3cd2..2f72332 100644
--- a/test/e2e/xla/max_int.mlir
+++ b/test/e2e/xla/max_int.mlir
@@ -5,7 +5,7 @@
 func @tensor() -> tensor<4xi32> {
   %lhs = iree.unfoldable_constant dense<[1, 6, 7, 8]> : tensor<4xi32>
   %rhs = iree.unfoldable_constant dense<[5, 6, 3, 8]> : tensor<4xi32>
-  %result = "xla_hlo.max"(%lhs, %rhs) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32>
+  %result = "xla_hlo.maximum"(%lhs, %rhs) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32>
   return %result : tensor<4xi32>
 }
 // CHECK: 4xi32=5 6 7 8
@@ -16,7 +16,7 @@
 func @tensor_odd_dim() -> tensor<3xi32> {
   %lhs = iree.unfoldable_constant dense<[1, 6, 7]> : tensor<3xi32>
   %rhs = iree.unfoldable_constant dense<[5, 6, 3]> : tensor<3xi32>
-  %result = "xla_hlo.max"(%lhs, %rhs) : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi32>
+  %result = "xla_hlo.maximum"(%lhs, %rhs) : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi32>
   return %result : tensor<3xi32>
 }
 // CHECK: 3xi32=5 6 7
@@ -27,7 +27,7 @@
 func @scalar() -> tensor<i32> {
   %lhs = iree.unfoldable_constant dense<1> : tensor<i32>
   %rhs = iree.unfoldable_constant dense<2> : tensor<i32>
-  %result = "xla_hlo.max"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+  %result = "xla_hlo.maximum"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32>
   return %result : tensor<i32>
 }
 // CHECK: i32=2
@@ -38,7 +38,7 @@
 func @negative() -> tensor<i32> {
   %lhs = iree.unfoldable_constant dense<1> : tensor<i32>
   %rhs = iree.unfoldable_constant dense<-2> : tensor<i32>
-  %result = "xla_hlo.max"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+  %result = "xla_hlo.maximum"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32>
   return %result : tensor<i32>
 }
 // CHECK: i32=1
@@ -49,7 +49,7 @@
 func @i8() -> tensor<i8> {
   %lhs = iree.unfoldable_constant dense<1> : tensor<i8>
   %rhs = iree.unfoldable_constant dense<2> : tensor<i8>
-  %result = "xla_hlo.max"(%lhs, %rhs) : (tensor<i8>, tensor<i8>) -> tensor<i8>
+  %result = "xla_hlo.maximum"(%lhs, %rhs) : (tensor<i8>, tensor<i8>) -> tensor<i8>
   return %result : tensor<i8>
 }
 // CHECK: i8=2
@@ -60,7 +60,7 @@
 func @i16() -> tensor<i16> {
   %lhs = iree.unfoldable_constant dense<1> : tensor<i16>
   %rhs = iree.unfoldable_constant dense<2> : tensor<i16>
-  %result = "xla_hlo.max"(%lhs, %rhs) : (tensor<i16>, tensor<i16>) -> tensor<i16>
+  %result = "xla_hlo.maximum"(%lhs, %rhs) : (tensor<i16>, tensor<i16>) -> tensor<i16>
   return %result : tensor<i16>
 }
 // CHECK: i16=2
@@ -71,7 +71,7 @@
 func @i64() -> tensor<i64> {
   %lhs = iree.unfoldable_constant dense<1> : tensor<i64>
   %rhs = iree.unfoldable_constant dense<2> : tensor<i64>
-  %result = "xla_hlo.max"(%lhs, %rhs) : (tensor<i64>, tensor<i64>) -> tensor<i64>
+  %result = "xla_hlo.maximum"(%lhs, %rhs) : (tensor<i64>, tensor<i64>) -> tensor<i64>
   return %result : tensor<i64>
 }
 // CHECK: i32=2
diff --git a/test/e2e/xla/min_float.mlir b/test/e2e/xla/min_float.mlir
index 7a2ceba..01dfe9a 100644
--- a/test/e2e/xla/min_float.mlir
+++ b/test/e2e/xla/min_float.mlir
@@ -5,7 +5,7 @@
 func @tensor() -> tensor<4xf32> {
   %lhs = iree.unfoldable_constant dense<[1.0, 2.0, 7.0, 4.0]> : tensor<4xf32>
   %rhs = iree.unfoldable_constant dense<[5.0, 2.0, 3.0, 4.0]> : tensor<4xf32>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
   return %result : tensor<4xf32>
 }
 // CHECK: 4xf32=1 2 3 4
@@ -16,7 +16,7 @@
 func @scalar() -> tensor<f32> {
   %lhs = iree.unfoldable_constant dense<1.0> : tensor<f32>
   %rhs = iree.unfoldable_constant dense<2.0> : tensor<f32>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32>
   return %result : tensor<f32>
 }
 // CHECK: f32=1
@@ -27,7 +27,7 @@
 func @double() -> tensor<f64> {
   %lhs = iree.unfoldable_constant dense<1.0> : tensor<f64>
   %rhs = iree.unfoldable_constant dense<2.0> : tensor<f64>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<f64>, tensor<f64>) -> tensor<f64>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<f64>, tensor<f64>) -> tensor<f64>
   return %result : tensor<f64>
 }
 // CHECK: f32=1
@@ -38,7 +38,7 @@
 func @negative() -> tensor<f32> {
   %lhs = iree.unfoldable_constant dense<1.0> : tensor<f32>
   %rhs = iree.unfoldable_constant dense<-2.0> : tensor<f32>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32>
   return %result : tensor<f32>
 }
 // CHECK: f32=-2
diff --git a/test/e2e/xla/min_int.mlir b/test/e2e/xla/min_int.mlir
index ea8f6cf..67c8183 100644
--- a/test/e2e/xla/min_int.mlir
+++ b/test/e2e/xla/min_int.mlir
@@ -5,7 +5,7 @@
 func @tensor() -> tensor<4xi32> {
   %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
   %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32>
   return %result : tensor<4xi32>
 }
 // CHECK: 4xi32=1 2 3 4
@@ -16,7 +16,7 @@
 func @tensor_odd_dim() -> tensor<3xi32> {
   %lhs = iree.unfoldable_constant dense<[1, 2, 7]> : tensor<3xi32>
   %rhs = iree.unfoldable_constant dense<[5, 2, 3]> : tensor<3xi32>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi32>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi32>
   return %result : tensor<3xi32>
 }
 // CHECK: 3xi32=1 2 3
@@ -27,7 +27,7 @@
 func @scalar() -> tensor<i32> {
   %lhs = iree.unfoldable_constant dense<1> : tensor<i32>
   %rhs = iree.unfoldable_constant dense<2> : tensor<i32>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32>
   return %result : tensor<i32>
 }
 // CHECK: i32=1
@@ -38,7 +38,7 @@
 func @negative() -> tensor<i32> {
   %lhs = iree.unfoldable_constant dense<1> : tensor<i32>
   %rhs = iree.unfoldable_constant dense<-2> : tensor<i32>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32>
   return %result : tensor<i32>
 }
 // CHECK: i32=-2
@@ -49,7 +49,7 @@
 func @i8() -> tensor<i8> {
   %lhs = iree.unfoldable_constant dense<1> : tensor<i8>
   %rhs = iree.unfoldable_constant dense<2> : tensor<i8>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<i8>, tensor<i8>) -> tensor<i8>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<i8>, tensor<i8>) -> tensor<i8>
   return %result : tensor<i8>
 }
 // CHECK: i8=1
@@ -60,7 +60,7 @@
 func @i16() -> tensor<i16> {
   %lhs = iree.unfoldable_constant dense<1> : tensor<i16>
   %rhs = iree.unfoldable_constant dense<2> : tensor<i16>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<i16>, tensor<i16>) -> tensor<i16>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<i16>, tensor<i16>) -> tensor<i16>
   return %result : tensor<i16>
 }
 // CHECK: i16=1
@@ -71,7 +71,7 @@
 func @i64() -> tensor<i64> {
   %lhs = iree.unfoldable_constant dense<1> : tensor<i64>
   %rhs = iree.unfoldable_constant dense<2> : tensor<i64>
-  %result = "xla_hlo.min"(%lhs, %rhs) : (tensor<i64>, tensor<i64>) -> tensor<i64>
+  %result = "xla_hlo.minimum"(%lhs, %rhs) : (tensor<i64>, tensor<i64>) -> tensor<i64>
   return %result : tensor<i64>
 }
 // CHECK: i32=1
diff --git a/test/e2e/xla/mnist.mlir b/test/e2e/xla/mnist.mlir
index 7ce5ef7..2f64e3f 100644
--- a/test/e2e/xla/mnist.mlir
+++ b/test/e2e/xla/mnist.mlir
@@ -18,7 +18,7 @@
     %cst_1 = constant  {name = "constant.6"} dense<0.5> : tensor<128xf32>
     %5 = "xla_hlo.broadcast_in_dim"(%cst_1) {broadcast_dimensions = dense<1> : tensor<1xi64>, name = "broadcast.7"} : (tensor<128xf32>) -> tensor<1x128xf32>
     %6 = "xla_hlo.add"(%4, %5) {name = "add.8"} : (tensor<1x128xf32>, tensor<1x128xf32>) -> tensor<1x128xf32>
-    %7 = "xla_hlo.max"(%0, %6) {name = "maximum.11"} : (tensor<1x128xf32>, tensor<1x128xf32>) -> tensor<1x128xf32>
+    %7 = "xla_hlo.maximum"(%0, %6) {name = "maximum.11"} : (tensor<1x128xf32>, tensor<1x128xf32>) -> tensor<1x128xf32>
     %cst_2 = constant  {name = "constant.12"} dense<0.5> : tensor<128x10xf32>
     %8 = "xla_hlo.dot"(%7, %cst_2) {name = "dot.13", precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x128xf32>, tensor<128x10xf32>) -> tensor<1x10xf32>
     %cst_3 = constant  {name = "constant.14"} dense<0.5> : tensor<10xf32>
@@ -27,11 +27,11 @@
     %cst_4 = constant  {name = "constant.17"} dense<0xFF800000> : tensor<f32>
     %11 = "xla_hlo.reduce"(%10, %cst_4) ( {
     ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>):   // no predecessors
-      %20 = "xla_hlo.max"(%arg1, %arg2) {name = "maximum.21"} : (tensor<f32>, tensor<f32>) -> tensor<f32>
+      %20 = "xla_hlo.maximum"(%arg1, %arg2) {name = "maximum.21"} : (tensor<f32>, tensor<f32>) -> tensor<f32>
       "xla_hlo.return"(%20) : (tensor<f32>) -> ()
     }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>
     %12 = "xla_hlo.broadcast_in_dim"(%11) {broadcast_dimensions = dense<0> : tensor<1xi64>, name = "broadcast.23"} : (tensor<1xf32>) -> tensor<1x10xf32>
-    %13 = "xla_hlo.sub"(%10, %12) {name = "subtract.24"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
+    %13 = "xla_hlo.subtract"(%10, %12) {name = "subtract.24"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
     %14 = "xla_hlo.exp"(%13) {name = "exponential.25"} : (tensor<1x10xf32>) -> tensor<1x10xf32>
     %cst_5 = constant  {name = "constant.27"} dense<0.5> : tensor<f32>
     %15 = "xla_hlo.reduce"(%14, %cst_5) ( {
@@ -40,7 +40,7 @@
       "xla_hlo.return"(%21) : (tensor<f32>) -> ()
     }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>
     %16 = "xla_hlo.broadcast_in_dim"(%15) {broadcast_dimensions = dense<0> : tensor<1xi64>, name = "broadcast.34"} : (tensor<1xf32>) -> tensor<1x10xf32>
-    %17 = "xla_hlo.div"(%14, %16) {name = "divide.35"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
+    %17 = "xla_hlo.divide"(%14, %16) {name = "divide.35"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
     %18 = "xla_hlo.reshape"(%17) {name = "reshape.36"} : (tensor<1x10xf32>) -> tensor<1x10xf32>
     %19 = "xla_hlo.tuple"(%18) {name = "tuple.37"} : (tensor<1x10xf32>) -> tuple<tensor<1x10xf32>>
     return %19 : tuple<tensor<1x10xf32>>
diff --git a/test/e2e/xla/reduce_float.mlir b/test/e2e/xla/reduce_float.mlir
index 118d170..341dd91 100644
--- a/test/e2e/xla/reduce_float.mlir
+++ b/test/e2e/xla/reduce_float.mlir
@@ -25,7 +25,7 @@
   %2 = "xla_hlo.reduce"(%0, %1)
   ( {
   ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>):   // no predecessors
-      %3 = "xla_hlo.max"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+      %3 = "xla_hlo.maximum"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
       "xla_hlo.return"(%3) : (tensor<f32>) -> ()
   })
   {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>
@@ -42,7 +42,7 @@
   %1 = iree.unfoldable_constant dense<999.0> : tensor<f32>
   %2 = "xla_hlo.reduce"(%0, %1) ( {
   ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>):   // no predecessors
-      %3 = "xla_hlo.min"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+      %3 = "xla_hlo.minimum"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
       "xla_hlo.return"(%3) : (tensor<f32>) -> ()
   }) {dimensions = dense<[1, 2]> : tensor<2xi64>} : (tensor<5x1x1xf32>, tensor<f32>) -> tensor<5xf32>
   return %2 : tensor<5xf32>
diff --git a/test/e2e/xla/reduce_int.mlir b/test/e2e/xla/reduce_int.mlir
index 4756352..149fd9f 100644
--- a/test/e2e/xla/reduce_int.mlir
+++ b/test/e2e/xla/reduce_int.mlir
@@ -24,7 +24,7 @@
   %1 = iree.unfoldable_constant dense<0> : tensor<i32>
   %2 = "xla_hlo.reduce"(%0, %1) ( {
   ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>):   // no predecessors
-    %3 = "xla_hlo.max"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+    %3 = "xla_hlo.maximum"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
     "xla_hlo.return"(%3) : (tensor<i32>) -> ()
   }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xi32>, tensor<i32>) -> tensor<1xi32>
   return %2 : tensor<1xi32>
@@ -40,7 +40,7 @@
   %1 = iree.unfoldable_constant dense<999> : tensor<i32>
   %2 = "xla_hlo.reduce"(%0, %1) ( {
   ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>):   // no predecessors
-    %3 = "xla_hlo.min"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+    %3 = "xla_hlo.minimum"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
     "xla_hlo.return"(%3) : (tensor<i32>) -> ()
   }) {dimensions = dense<[1, 2]> : tensor<2xi64>} : (tensor<5x1x1xi32>, tensor<i32>) -> tensor<5xi32>
   return %2 : tensor<5xi32>
diff --git a/test/e2e/xla/through_std.mlir b/test/e2e/xla/through_std.mlir
index f447fbe..7ecac23 100644
--- a/test/e2e/xla/through_std.mlir
+++ b/test/e2e/xla/through_std.mlir
@@ -5,7 +5,7 @@
 func @xla_through_stdops () -> (tensor<f32>, tensor<f32>) {
   %tf32 = iree.unfoldable_constant dense<1.0> : tensor<f32>
   %0 = "xla_hlo.add"(%tf32, %tf32) : (tensor<f32>, tensor<f32>) -> tensor<f32>
-  %1 = "xla_hlo.mul"(%tf32, %tf32) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+  %1 = "xla_hlo.multiply"(%tf32, %tf32) : (tensor<f32>, tensor<f32>) -> tensor<f32>
   return %0, %1 : tensor<f32>, tensor<f32>
 }
 // CHECK: f32=2
diff --git a/test/e2e/xla/unidirectional_lstm.mlir b/test/e2e/xla/unidirectional_lstm.mlir
index d2079ff..af24375 100644
--- a/test/e2e/xla/unidirectional_lstm.mlir
+++ b/test/e2e/xla/unidirectional_lstm.mlir
@@ -11,15 +11,15 @@
 // some calls from @main and the call graphs of the removed callees.
 
 func @Min_reduction.47(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<f32> attributes { sym_visibility = "private" } {
-  %0 = xla_hlo.min %arg0, %arg1 : tensor<f32>
+  %0 = xla_hlo.minimum %arg0, %arg1 : tensor<f32>
   return %0 : tensor<f32>
 }
 func @Max_reduction.51(%arg0: tensor<i32>, %arg1: tensor<i32>) -> tensor<i32> attributes { sym_visibility = "private" } {
-  %0 = xla_hlo.max %arg0, %arg1 : tensor<i32>
+  %0 = xla_hlo.maximum %arg0, %arg1 : tensor<i32>
   return %0 : tensor<i32>
 }
 func @Max_1_reduction.55(%arg0: tensor<i32>, %arg1: tensor<i32>) -> tensor<i32> attributes { sym_visibility = "private" } {
-  %0 = xla_hlo.max %arg0, %arg1 : tensor<i32>
+  %0 = xla_hlo.maximum %arg0, %arg1 : tensor<i32>
   return %0 : tensor<i32>
 }
 func @ForwardLoopCond_gFAnjWGSoLs__.167(%arg0: tuple<tensor<i64>, tensor<i64>, tensor<40xf32>, tensor<i64>, tensor<74x40xf32>, tensor<i64>, tensor<1x10xf32>, tensor<1x10xf32>, tensor<5x1x64xf32>, tensor<5x1x1xf32>, tensor<5x1x1xf32>, tensor<5xi64>, tensor<5x1x10xf32>, tensor<5x1x10xf32>>) -> tuple<tensor<i1>> attributes { sym_visibility = "private" } {
@@ -36,7 +36,7 @@
   %1 = "xla_hlo.convert"(%cst_0) : (tensor<f32>) -> tensor<f32>
   %2 = "xla_hlo.reduce"(%0, %1) ( {
   ^bb0(%arg5: tensor<f32>, %arg6: tensor<f32>):
-    %42 = xla_hlo.min %arg5, %arg6 : tensor<f32>
+    %42 = xla_hlo.minimum %arg5, %arg6 : tensor<f32>
     "xla_hlo.return"(%42) : (tensor<f32>) -> ()
   }) {dimensions = dense<[1, 2]> : tensor<2xi64>} : (tensor<5x1x1xf32>, tensor<f32>) -> tensor<5xf32>
   %3 = "xla_hlo.convert"(%2) : (tensor<5xf32>) -> tensor<5xf32>
@@ -45,17 +45,17 @@
   %5 = "xla_hlo.compare"(%3, %4) {comparison_direction = "EQ"} : (tensor<5xf32>, tensor<5xf32>) -> tensor<5xi1>
   %6 = "xla_hlo.convert"(%5) : (tensor<5xi1>) -> tensor<5xi32>
   %cst_2 = constant  dense<[1, 2, 3, 4, 5]> : tensor<5xi32>
-  %7 = xla_hlo.mul %6, %cst_2 : tensor<5xi32>
+  %7 = xla_hlo.multiply %6, %cst_2 : tensor<5xi32>
   %8 = "xla_hlo.convert"(%7) : (tensor<5xi32>) -> tensor<5xi32>
   %cst_3 = constant dense<-2147483648> : tensor<i32>
   %9 = "xla_hlo.convert"(%cst_3) : (tensor<i32>) -> tensor<i32>
   %10 = "xla_hlo.reduce"(%8, %9) ( {
   ^bb0(%arg5: tensor<i32>, %arg6: tensor<i32>):
-    %42 = xla_hlo.max %arg5, %arg6 : tensor<i32>
+    %42 = xla_hlo.maximum %arg5, %arg6 : tensor<i32>
     "xla_hlo.return"(%42) : (tensor<i32>) -> ()
   }) {dimensions = dense<0> : tensor<1xi64>} : (tensor<5xi32>, tensor<i32>) -> tensor<i32>
   %11 = "xla_hlo.convert"(%10) : (tensor<i32>) -> tensor<i32>
-  %12 = xla_hlo.sub %cst, %11 : tensor<i32>
+  %12 = xla_hlo.subtract %cst, %11 : tensor<i32>
   %cst_4 = constant dense<5> : tensor<i32>
   %13 = "xla_hlo.compare"(%12, %cst_4) {comparison_direction = "EQ"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
   %cst_5 = constant dense<0> : tensor<i32>
@@ -66,21 +66,21 @@
   %16 = "xla_hlo.compare"(%14, %15) {comparison_direction = "EQ"} : (tensor<5xf32>, tensor<5xf32>) -> tensor<5xi1>
   %17 = "xla_hlo.convert"(%16) : (tensor<5xi1>) -> tensor<5xi32>
   %cst_8 = constant  dense<[1, 2, 3, 4, 5]> : tensor<5xi32>
-  %18 = xla_hlo.mul %17, %cst_8 : tensor<5xi32>
+  %18 = xla_hlo.multiply %17, %cst_8 : tensor<5xi32>
   %19 = "xla_hlo.convert"(%18) : (tensor<5xi32>) -> tensor<5xi32>
   %cst_9 = constant dense<-2147483648> : tensor<i32>
   %20 = "xla_hlo.convert"(%cst_9) : (tensor<i32>) -> tensor<i32>
   %21 = "xla_hlo.reduce"(%19, %20) ( {
   ^bb0(%arg5: tensor<i32>, %arg6: tensor<i32>):
-    %42 = xla_hlo.max %arg5, %arg6 : tensor<i32>
+    %42 = xla_hlo.maximum %arg5, %arg6 : tensor<i32>
     "xla_hlo.return"(%42) : (tensor<i32>) -> ()
   }) {dimensions = dense<0> : tensor<1xi64>} : (tensor<5xi32>, tensor<i32>) -> tensor<i32>
   %22 = "xla_hlo.convert"(%21) : (tensor<i32>) -> tensor<i32>
-  %23 = xla_hlo.sub %cst_6, %22 : tensor<i32>
+  %23 = xla_hlo.subtract %cst_6, %22 : tensor<i32>
   %24 = "xla_hlo.select"(%13, %cst_5, %23) : (tensor<i1>, tensor<i32>, tensor<i32>) -> tensor<i32>
   %25 = "xla_hlo.convert"(%24) : (tensor<i32>) -> tensor<i64>
   %cst_10 = constant dense<5> : tensor<i32>
-  %26 = xla_hlo.sub %cst_10, %12 : tensor<i32>
+  %26 = xla_hlo.subtract %cst_10, %12 : tensor<i32>
   %27 = "xla_hlo.convert"(%26) : (tensor<i32>) -> tensor<i64>
   %cst_11 = constant dense<0.000000e+00> : tensor<f32>
   %28 = "xla_hlo.broadcast_in_dim"(%cst_11) {broadcast_dimensions = dense<[]> : tensor<0xi64>} : (tensor<f32>) -> tensor<40xf32>
@@ -115,7 +115,7 @@
     %52 = "xla_hlo.broadcast_in_dim"(%51) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<1xf32>) -> tensor<1x10xf32>
     %cst_19 = constant dense<1.000000e+00> : tensor<f32>
     %53 = "xla_hlo.broadcast_in_dim"(%cst_19) {broadcast_dimensions = dense<[]> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x10xf32>
-    %54 = xla_hlo.mul %52, %53 : tensor<1x10xf32>
+    %54 = xla_hlo.multiply %52, %53 : tensor<1x10xf32>
     %cst_20 = constant dense<0.000000e+00> : tensor<f32>
     %55 = "xla_hlo.broadcast_in_dim"(%cst_20) {broadcast_dimensions = dense<[]> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x10xf32>
     %56 = "xla_hlo.compare"(%54, %55) {comparison_direction = "GT"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xi1>
@@ -133,36 +133,36 @@
     %67 = "xla_hlo.reshape"(%45) : (tensor<40xf32>) -> tensor<1x40xf32>
     %68 = xla_hlo.add %66, %67 : tensor<1x40xf32>
     %69 = "xla_hlo.slice"(%68) {limit_indices = dense<[1, 30]> : tensor<2xi64>, start_indices = dense<[0, 20]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} : (tensor<1x40xf32>) -> tensor<1x10xf32>
-    %70 = xla_hlo.mul %60, %69 : tensor<1x10xf32>
+    %70 = xla_hlo.multiply %60, %69 : tensor<1x10xf32>
     %71 = "xla_hlo.tanh"(%70) : (tensor<1x10xf32>) -> tensor<1x10xf32>
-    %72 = xla_hlo.mul %59, %71 : tensor<1x10xf32>
+    %72 = xla_hlo.multiply %59, %71 : tensor<1x10xf32>
     %73 = xla_hlo.add %58, %72 : tensor<1x10xf32>
-    %74 = xla_hlo.mul %73, %57 : tensor<1x10xf32>
+    %74 = xla_hlo.multiply %73, %57 : tensor<1x10xf32>
     %cst_22 = constant dense<5.000000e-01> : tensor<f32>
     %75 = "xla_hlo.broadcast_in_dim"(%cst_22) {broadcast_dimensions = dense<[]> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x10xf32>
     %76 = "xla_hlo.broadcast_in_dim"(%cst_22) {broadcast_dimensions = dense<[]> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x10xf32>
     %77 = "xla_hlo.broadcast_in_dim"(%cst_22) {broadcast_dimensions = dense<[]> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x10xf32>
     %78 = "xla_hlo.slice"(%68) {limit_indices = dense<[1, 20]> : tensor<2xi64>, start_indices = dense<[0, 10]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} : (tensor<1x40xf32>) -> tensor<1x10xf32>
-    %79 = xla_hlo.mul %77, %78 : tensor<1x10xf32>
+    %79 = xla_hlo.multiply %77, %78 : tensor<1x10xf32>
     %80 = "xla_hlo.tanh"(%79) : (tensor<1x10xf32>) -> tensor<1x10xf32>
-    %81 = xla_hlo.mul %76, %80 : tensor<1x10xf32>
+    %81 = xla_hlo.multiply %76, %80 : tensor<1x10xf32>
     %82 = xla_hlo.add %75, %81 : tensor<1x10xf32>
     %83 = "xla_hlo.slice"(%68) {limit_indices = dense<[1, 10]> : tensor<2xi64>, start_indices = dense<0> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} : (tensor<1x40xf32>) -> tensor<1x10xf32>
     %84 = "xla_hlo.tanh"(%83) : (tensor<1x10xf32>) -> tensor<1x10xf32>
-    %85 = xla_hlo.mul %82, %84 : tensor<1x10xf32>
+    %85 = xla_hlo.multiply %82, %84 : tensor<1x10xf32>
     %86 = xla_hlo.add %74, %85 : tensor<1x10xf32>
     %cst_23 = constant dense<1.000000e+01> : tensor<f32>
     %87 = "xla_hlo.broadcast_in_dim"(%cst_23) {broadcast_dimensions = dense<[]> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x10xf32>
-    %88 = xla_hlo.min %86, %87 : tensor<1x10xf32>
+    %88 = xla_hlo.minimum %86, %87 : tensor<1x10xf32>
     %cst_24 = constant dense<-1.000000e+01> : tensor<f32>
     %89 = "xla_hlo.broadcast_in_dim"(%cst_24) {broadcast_dimensions = dense<[]> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x10xf32>
-    %90 = xla_hlo.max %88, %89 : tensor<1x10xf32>
+    %90 = xla_hlo.maximum %88, %89 : tensor<1x10xf32>
     %91 = "xla_hlo.select"(%56, %57, %90) : (tensor<1x10xi1>, tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
     %92 = "xla_hlo.reshape"(%50) : (tensor<1x1xf32>) -> tensor<1xf32>
     %93 = "xla_hlo.broadcast_in_dim"(%92) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<1xf32>) -> tensor<1x10xf32>
     %cst_25 = constant dense<1.000000e+00> : tensor<f32>
     %94 = "xla_hlo.broadcast_in_dim"(%cst_25) {broadcast_dimensions = dense<[]> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x10xf32>
-    %95 = xla_hlo.mul %93, %94 : tensor<1x10xf32>
+    %95 = xla_hlo.multiply %93, %94 : tensor<1x10xf32>
     %cst_26 = constant dense<0.000000e+00> : tensor<f32>
     %96 = "xla_hlo.broadcast_in_dim"(%cst_26) {broadcast_dimensions = dense<[]> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x10xf32>
     %97 = "xla_hlo.compare"(%95, %96) {comparison_direction = "GT"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xi1>
@@ -171,12 +171,12 @@
     %99 = "xla_hlo.broadcast_in_dim"(%cst_27) {broadcast_dimensions = dense<[]> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x10xf32>
     %100 = "xla_hlo.broadcast_in_dim"(%cst_27) {broadcast_dimensions = dense<[]> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x10xf32>
     %101 = "xla_hlo.slice"(%68) {limit_indices = dense<[1, 40]> : tensor<2xi64>, start_indices = dense<[0, 30]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} : (tensor<1x40xf32>) -> tensor<1x10xf32>
-    %102 = xla_hlo.mul %100, %101 : tensor<1x10xf32>
+    %102 = xla_hlo.multiply %100, %101 : tensor<1x10xf32>
     %103 = "xla_hlo.tanh"(%102) : (tensor<1x10xf32>) -> tensor<1x10xf32>
-    %104 = xla_hlo.mul %99, %103 : tensor<1x10xf32>
+    %104 = xla_hlo.multiply %99, %103 : tensor<1x10xf32>
     %105 = xla_hlo.add %98, %104 : tensor<1x10xf32>
     %106 = "xla_hlo.tanh"(%90) : (tensor<1x10xf32>) -> tensor<1x10xf32>
-    %107 = xla_hlo.mul %105, %106 : tensor<1x10xf32>
+    %107 = xla_hlo.multiply %105, %106 : tensor<1x10xf32>
     %108 = "xla_hlo.select"(%97, %63, %107) : (tensor<1x10xi1>, tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
     %109 = "xla_hlo.get_tuple_element"(%arg5) {index = 10 : i32} : (tuple<tensor<i64>, tensor<i64>, tensor<40xf32>, tensor<i64>, tensor<74x40xf32>, tensor<i64>, tensor<1x10xf32>, tensor<1x10xf32>, tensor<5x1x64xf32>, tensor<5x1x1xf32>, tensor<5x1x1xf32>, tensor<5xi64>, tensor<5x1x10xf32>, tensor<5x1x10xf32>>) -> tensor<5x1x1xf32>
     %110 = "xla_hlo.get_tuple_element"(%arg5) {index = 11 : i32} : (tuple<tensor<i64>, tensor<i64>, tensor<40xf32>, tensor<i64>, tensor<74x40xf32>, tensor<i64>, tensor<1x10xf32>, tensor<1x10xf32>, tensor<5x1x64xf32>, tensor<5x1x1xf32>, tensor<5x1x1xf32>, tensor<5xi64>, tensor<5x1x10xf32>, tensor<5x1x10xf32>>) -> tensor<5xi64>
diff --git a/test/models/edge_detection.mlir b/test/models/edge_detection.mlir
index f90668d..9bfbd67 100644
--- a/test/models/edge_detection.mlir
+++ b/test/models/edge_detection.mlir
@@ -8,9 +8,9 @@
     %0 = xla_hlo.constant dense<[[[[-1.000000e+00]], [[0.000000e+00]], [[1.000000e+00]]], [[[-2.000000e+00]], [[0.000000e+00]], [[2.000000e+00]]], [[[-1.000000e+00]], [[0.000000e+00]], [[1.000000e+00]]]]> : tensor<3x3x1x1xf32>
     %1 = xla_hlo.constant dense<[[[[1.000000e+00]], [[2.000000e+00]], [[1.000000e+00]]], [[[0.000000e+00]], [[0.000000e+00]], [[0.000000e+00]]], [[[-1.000000e+00]], [[-2.000000e+00]], [[-1.000000e+00]]]]> : tensor<3x3x1x1xf32>
     %2 = "xla_hlo.conv"(%arg0, %0) {batch_group_count = 1 : i64, dimension_numbers = {input_batch_dimension = 0 : i64, input_feature_dimension = 3 : i64, input_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>, kernel_input_feature_dimension = 2 : i64, kernel_output_feature_dimension = 3 : i64, kernel_spatial_dimensions = dense<[0, 1]> : tensor<2xi64>, output_batch_dimension = 0 : i64, output_feature_dimension = 3 : i64, output_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>}, feature_group_count = 1 : i64, padding = dense<1> : tensor<2x2xi64>, rhs_dilation = dense<1> : tensor<2xi64>, window_strides = dense<1> : tensor<2xi64>} : (tensor<1x128x128x1xf32>, tensor<3x3x1x1xf32>) -> tensor<1x128x128x1xf32>
-    %3 = xla_hlo.mul %2, %2 : tensor<1x128x128x1xf32>
+    %3 = xla_hlo.multiply %2, %2 : tensor<1x128x128x1xf32>
     %4 = "xla_hlo.conv"(%arg0, %1) {batch_group_count = 1 : i64, dimension_numbers = {input_batch_dimension = 0 : i64, input_feature_dimension = 3 : i64, input_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>, kernel_input_feature_dimension = 2 : i64, kernel_output_feature_dimension = 3 : i64, kernel_spatial_dimensions = dense<[0, 1]> : tensor<2xi64>, output_batch_dimension = 0 : i64, output_feature_dimension = 3 : i64, output_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>}, feature_group_count = 1 : i64, padding = dense<1> : tensor<2x2xi64>, rhs_dilation = dense<1> : tensor<2xi64>, window_strides = dense<1> : tensor<2xi64>} : (tensor<1x128x128x1xf32>, tensor<3x3x1x1xf32>) -> tensor<1x128x128x1xf32>
-    %5 = xla_hlo.mul %4, %4 : tensor<1x128x128x1xf32>
+    %5 = xla_hlo.multiply %4, %4 : tensor<1x128x128x1xf32>
     %6 = xla_hlo.add %3, %5 : tensor<1x128x128x1xf32>
     %7 = "xla_hlo.sqrt"(%6) : (tensor<1x128x128x1xf32>) -> tensor<1x128x128x1xf32>
     return %7 : tensor<1x128x128x1xf32>
diff --git a/test/models/mnist.mlir b/test/models/mnist.mlir
index 2b6df73..6802a7a 100644
--- a/test/models/mnist.mlir
+++ b/test/models/mnist.mlir
@@ -24,7 +24,7 @@
     %cst_1 = constant  {name = "constant.6"} dense<[0.00180979178, -0.00128747581, -0.001201501, -0.0360582806, 0.0236181393, 0.0454008877, 0.0120176105, 0.0890586376, -0.00548917893, 0.0535316467, -0.0195648093, 0.00574736623, -0.00725247897, 0.0310232546, 2.836130e-02, -0.0188486483, -0.00258476194, 0.0227473844, 0.00291113905, -0.0495234169, 0.00637712515, 0.0514211543, 0.00229622098, 0.0491134487, -0.0558668263, 0.0909168571, -0.011906784, 0.0151743693, -0.0105079701, 0.00263013947, 0.0189482253, -0.0108587798, 0.0751609355, -0.0358677395, 0.0590642393, -0.0775720253, 0.0589618646, 0.10349872, -0.00274306419, -0.0312502868, 0.00333261886, 0.00910504907, 0.0216336083, 0.056877967, 0.0864347144, 0.0466054045, 0.0586982816, -0.0203834437, -5.626260e-02, -0.017801106, -0.00396474544, -0.017970033, -0.0256265551, 0.0489372499, -0.00651295763, -0.0350460112, 0.0291766617, 0.00479861069, 0.0237274338, 0.0348542407, 0.0677872151, 0.0245250557, 0.0508054681, 0.0422587655, 0.0730319619, 0.0025935811, -0.0277704448, 9.64390754E-4, 0.00164659088, 0.0798677206, 0.133678138, 0.0322072394, 0.0558781214, 0.0193853434, 0.00515032187, -0.00355286896, 0.0668371543, 0.0142582273, 0.0781719386, 0.00483097089, 0.00813726429, 0.0445716828, 0.0245931037, 0.0166326128, 0.0474203937, 0.0324643776, -0.0310089402, 0.0192547292, 0.0636251494, -0.00645098882, 0.051795654, 0.0795653983, -0.0386578813, 0.0565713793, 0.00196879101, 0.0201079044, 0.11367888, 0.0214288831, 0.0393215232, 0.0381225236, 1.174720e-02, 0.0623513386, 0.0427070446, 0.0237223972, 0.0676762611, 0.0697290376, 0.0389622599, 0.00733669475, 0.0565525182, -0.00873772241, -0.0214094054, -0.0121570313, 0.0171261076, -0.042719733, -0.0709377304, -0.00414954033, 0.0314523205, 0.00450869836, 0.00449752575, 0.041478429, 0.0945244356, -0.0497375578, 0.0145085724, 0.105547592, -0.0437812582, 0.01818325, -0.0811406821, -0.0240946412]> : tensor<128xf32>
     %5 = "xla_hlo.broadcast_in_dim"(%cst_1) {broadcast_dimensions = dense<1> : tensor<1xi64>, name = "broadcast.7"} : (tensor<128xf32>) -> tensor<1x128xf32>
     %6 = "xla_hlo.add"(%4, %5) {name = "add.8"} : (tensor<1x128xf32>, tensor<1x128xf32>) -> tensor<1x128xf32>
-    %7 = "xla_hlo.max"(%0, %6) {name = "maximum.11"} : (tensor<1x128xf32>, tensor<1x128xf32>) -> tensor<1x128xf32>
+    %7 = "xla_hlo.maximum"(%0, %6) {name = "maximum.11"} : (tensor<1x128xf32>, tensor<1x128xf32>) -> tensor<1x128xf32>
     %cst_2 = constant  {name = "constant.12"} dense<[[-0.17363435, -0.0707960129, 6.038730e-02, 0.121469341, -8.579290e-02, -0.143173471, 0.228746504, -0.145717144, -0.135060593, 0.140740767], [5.333820e-03, -0.135003075, 0.107362889, 0.311269701, 0.107835442, -0.00957506895, -0.0113238972, 9.436320e-02, -0.0351445265, -0.249150515], [-0.060859222, -0.157955885, 0.0543048941, -0.010158102, -0.0983931943, 0.0325914212, -0.0221404675, 0.140480801, 0.219866768, 0.148706794], [-0.0231706034, 0.189667493, 0.339141279, 0.297713399, -0.400857776, -0.257582575, -0.42581442, 0.225847244, 0.0542479306, -0.00848766416], [0.155154109, 0.262343377, 0.258146912, 0.0306026582, 0.141189575, 0.127351299, 0.0131782508, -0.245702147, 0.185197026, -0.398140848], [-0.213607386, 0.0672283098, 0.236905813, -0.225607336, 0.0225677192, 0.0694317594, -0.113246329, 0.245288506, -0.0194486771, -0.210125729], [-0.181720048, -0.113102444, -0.258585364, 0.187338203, -0.00800009817, -0.126790538, -0.114432767, -0.0659572855, -0.186335564, 0.270344526], [-0.0809494108, 0.0859531611, 0.19903186, 0.156178951, 0.00449119089, 0.264205396, -0.14980799, -0.179858178, -0.161061853, -0.0697564408], [0.111823864, 0.139123663, -0.215125829, -0.237549827, -2.723160e-01, 0.0774042978, 0.173556924, -0.102436289, 0.132224619, 0.204164654], [0.166308358, -0.0898675844, -0.309499949, 0.262152076, 0.166523397, 0.350022405, -0.134389818, 0.0800818502, -0.250730485, 0.278443933], [-0.182188168, -0.0662540421, -0.16912438, 0.253069848, 2.666290e-01, 0.147774175, -0.181770161, -0.239382803, 0.180332914, 0.14152202], [-0.213285133, -0.119788148, 0.146969259, 0.00588027807, 0.285986096, -0.0156045202, 0.0618853755, -0.0404547676, 0.199955285, 0.00802243501], [0.121508963, -0.0461791642, -0.153700531, 0.167498261, 0.172541648, -0.0350595154, 0.201652139, 0.0710540265, 0.0755672902, -0.0469412282], [0.0830276161, -0.191616312, 0.383806795, 0.183871031, -0.349610031, 0.333669364, -0.168258801, -0.120922968, -0.0744043514, -0.157934666], [0.217354283, -0.0641109496, 0.199787721, -0.0672617331, -0.0763153731, 0.130093575, -0.212879151, -0.0289937686, 0.0341918021, 0.131831452], [0.101335697, -0.217161059, 0.323923737, -1.124570e-02, -0.243533522, 0.0500382632, 0.267564029, 0.162072048, 0.169035599, 0.0175218079], [0.0720043182, 0.181623921, 0.104101956, 0.0909626707, -0.0998992919, 0.107782066, 0.13765879, -0.166188747, 0.087232843, 0.201865882], [0.185548037, 0.0476345345, 0.0827088207, -0.145057097, -0.11100968, -0.125786468, 0.203262866, 0.204047367, -0.196534082, -0.170384556], [0.30700314, -0.0496439189, 0.179890692, 0.0554856658, -0.142839238, 0.127258584, -0.230494335, -0.00550242327, -0.0320532843, -3.000920e-01], [0.31118235, 0.00334946788, 0.111910984, -0.302830279, 0.121877529, -0.284978181, 0.0194577854, -0.261625707, 0.121911936, 0.138116539], [-0.19934088, -0.194442376, -0.0388123132, -0.0742789208, 0.0712441728, 8.882910e-02, 0.0401109159, -0.148758903, 0.0713230371, -0.238374695], [-0.00572971953, 0.0051445188, -0.241541177, 4.522450e-02, 0.0202827249, 0.229814544, 0.129838377, -0.156346813, -0.177438587, 0.307789236], [0.00586719438, -0.23780936, 0.0713653266, -0.12190327, 0.182632983, -0.209033892, -0.00922673941, -0.151551381, -0.16923815, 0.112413712], [-0.134286478, 0.121913657, -0.0402135141, 0.233039021, -0.206312865, 0.319349617, 0.00143496622, 0.254222333, 0.0337958224, -0.0956300869], [0.0190694649, -0.28896293, -0.209214076, 0.0274076108, 0.200228855, -0.165110469, -0.222465485, -0.150394216, 0.158042848, 0.0871670097], [0.131613567, -0.0569929108, 0.0477676541, -0.154282197, 0.177263156, 0.379474372, -0.198784053, 0.235023528, -0.152511865, -0.345084906], [-0.00747684622, 0.240096062, 0.0852414817, 0.149832889, -0.208482146, 0.108278856, 0.158599466, -0.261659771, 0.28667134, 0.00529837143], [-0.185448363, -0.142471164, -0.00380585133, 0.205363795, -0.115163729, -0.176866487, -0.1503966, 0.0945485532, -0.249040067, -0.146563202], [0.0507423021, -0.162201419, -0.104880594, -0.119434029, -0.0137509312, 0.0857468545, -0.14507775, -0.0310742967, 0.142568573, 0.137628734], [0.292243481, -0.198636204, -0.141308501, -0.0398842096, -0.23759377, 0.158741564, -0.255989969, 0.0791561231, 0.135853305, 0.0416310765], [0.111437723, 0.293795198, -7.0257741E-4, 0.0395522788, -0.213674441, -0.0549810939, 0.0761651173, 0.108064711, -0.258550107, 0.129741773], [-0.227937847, 0.310785294, -0.156648576, -0.0240825266, -0.0934058725, -0.0344269089, -0.145666569, -0.169869885, 0.240113556, -0.0654262826], [0.0319960192, 0.0288540535, 0.256446928, 0.1365605, -0.157555774, 0.301516473, 0.174934179, -0.267932802, -0.20990786, -0.289210945], [-0.0986520349, -0.0171110164, -0.221782967, 0.00511969253, 0.441806704, -0.197293893, -0.291264772, 0.219537139, 0.205427408, -0.0171957519], [0.218552783, -0.155077934, -0.0703493133, -0.0902480334, 0.232104361, 0.23108305, 0.134619236, 0.0748627409, -0.275893092, -0.0390154459], [0.381019801, -0.339646637, 0.0624736026, 0.133897841, -0.19746834, 6.436870e-02, -7.945660e-03, -0.362655699, 0.233752429, 0.0368957892], [-0.0580835491, 0.0224719848, 0.00401857076, -0.0762882828, 0.151949599, -0.221072719, 0.025190426, 0.327190787, -0.274966955, 0.248151094], [-0.394082546, 0.247909665, -0.188450053, 0.281511843, -3.341570e-01, 0.125203356, -0.141429037, -0.0718699395, -0.143214151, 0.229724839], [-0.0137553262, -0.228617609, -0.140930399, -2.799820e-01, -0.0553898327, 0.178554818, 0.130356207, -0.258220017, 0.202061623, 0.00610400829], [0.261053771, -0.287141502, 0.509392798, 0.163613275, -0.207967952, -0.279594868, 0.0682455376, -0.140134558, -0.330549598, 0.221153483], [-0.072769165, -0.107394435, -0.0957670435, 0.174531385, 0.0684501082, -0.00831179414, 0.189872384, -0.237616166, -0.0591602065, 0.125886753], [-1.765560e-01, 0.102832273, 0.0100140488, -0.0257020816, 0.181918576, 6.714420e-02, 0.100126542, -0.0761945695, 0.122161649, 0.059734337], [0.14992249, -0.042751465, -0.242604434, -0.0491485447, -0.30338645, 0.0788762643, -0.299695104, -0.13762036, 0.0150603065, 0.0795048698], [0.172013864, 0.206088021, 0.22058697, -0.125324488, 0.310980856, -0.216724753, 0.0800841823, 0.0215022508, -0.381094307, -0.271564096], [0.187171385, 0.162661836, -0.226146564, -0.101458885, 0.0100699505, 0.123740695, 0.225523502, 0.25284794, -0.159023091, 0.253711969], [-0.301451921, 0.0313369371, 0.00644301111, 0.0920508429, -0.123227581, 0.0879808217, -0.0213221256, 0.2880193, 0.00138571649, -0.0611888096], [-0.31910035, 0.235809758, 0.169649586, -0.34670642, 0.302606314, -0.222152904, 0.393572927, -0.348440439, -0.0699117631, 0.0882392675], [-0.0748971105, -0.431775421, -0.0966364666, -0.226431355, 0.382968128, -0.0831228048, -0.00299148099, -0.19580473, 0.0980860963, 0.250937313], [0.178527981, 0.076650247, -0.00415729918, 0.0951654613, -2.375450e-01, -0.216295198, 0.0110081164, -0.284828633, 0.192532718, -0.157802403], [0.299306184, -0.271432847, 0.183518261, 0.026472874, -0.0726066753, -0.0390495062, 0.107637577, 0.00404472463, 0.0666918904, -0.0401334688], [-0.030999558, 0.259209841, -0.0465492047, 0.0529589504, 0.0611805804, -0.0262979791, 0.0688910112, -7.44911667E-4, 0.167459026, -0.26001966], [0.255778253, -0.24220413, 0.350433469, 0.350896806, -0.244471326, 0.178872496, -0.100021079, 0.0284363627, 0.096491307, -0.193851605], [0.0302060489, -0.179981887, 0.0229005795, -0.280266136, 0.180455044, -0.0580929406, 0.355026275, 0.125420481, 0.255031645, -0.131591201], [-0.275648683, -0.0157170743, 0.260199428, -0.224597305, -0.303664744, 0.0232193582, 0.229699343, 0.366713554, -0.0230590478, -0.232098207], [0.141025662, -0.127063408, -0.18927443, 0.0753805339, -0.223815992, -0.0688226222, 0.171983212, 0.208703697, -0.0607432127, 0.26896134], [0.00368685625, 0.0151929613, 0.0179557707, 0.0680684745, -0.308445036, -0.0548807904, -0.0591526218, 0.15423733, 0.235452116, 0.145921558], [0.0204494651, -0.0814779475, 0.00520399632, 0.0311975423, 0.0860458388, 0.343610466, -0.118007861, 0.0334062725, -0.0875458344, -0.0238407608], [-0.0935574844, -0.194300473, -0.166375577, 0.0796527639, 0.0147504658, -0.0735810548, -0.0172830094, 0.168246791, 0.0553085096, -0.0942590088], [-0.238010228, 0.111086741, -0.146693811, -0.0474509485, -1.254460e-01, 0.079645589, -0.0469389521, 0.0198243875, -0.0817721113, -0.272092968], [-0.396736801, 0.332016051, -0.0264706276, 0.283282727, -0.2508744, 0.104432434, 0.00444465084, -0.0408442765, 0.254120976, -0.0482885838], [-0.09970849, 0.331348449, 0.00756065454, -9.879130e-02, -0.0698768348, 0.295237154, -0.216183186, 0.0113916555, 3.391220e-02, 0.232303575], [0.0236333422, -0.0779071822, 0.0288420524, 0.119856723, -0.169540137, -0.028769955, -0.0591493063, 0.276199877, -0.136728957, -0.200735331], [-0.201988384, 0.137718067, 0.00461096223, -0.176960647, 0.295218349, -0.14878659, -0.195739403, -0.124464899, -0.247669652, 0.331599027], [-0.0809967219, -0.0614988767, 0.0474593081, -0.0996292084, 0.292685807, 0.224856764, -0.0335971639, 0.0651911348, 0.0482374281, 0.0931988508], [-0.084696129, 0.178495869, -0.242035404, -0.146512449, 0.220621809, 0.247746885, 0.0751499459, 0.177417561, -0.163688406, -0.0584881492], [-0.111735344, 0.296827227, -0.0607453175, 0.00291904272, -0.162802532, -0.237988725, -0.245114595, 0.14962557, 0.0833254978, 0.282257915], [0.129119486, 0.00415245071, 0.221430361, -0.128097311, -0.291034788, -0.201655984, 0.240159303, -0.111073785, 0.156223252, 0.18907699], [0.0207739267, 0.202450186, 0.184913442, 0.0671677887, -0.15945904, 0.100802258, 0.204368159, 0.0663899928, 0.11844565, 0.00254672393], [0.147628516, -0.108610041, -0.120382845, 0.0630499422, 0.148190275, -0.173440784, -0.21185419, 0.116352729, -0.141718119, -4.59181669E-4], [6.394150e-02, -0.214126274, -0.262781709, -0.195635393, 0.10030704, 0.0192587748, 0.2180603, 0.276796103, -0.19745253, 0.250166684], [-0.264964819, -0.172969192, -0.320235193, 8.573610e-02, 0.251054466, 0.349534839, -0.0533160381, 0.215639099, -0.112659849, -0.117045388], [0.0715036839, 0.032501433, 0.290438265, -0.247630075, -0.144847691, -0.0191427749, 2.999250e-01, -0.257535666, -0.189960018, 0.130332768], [-0.203149259, 0.272928387, -7.304530e-02, 0.0234719831, -0.170207545, -0.11776901, -0.0428751446, -0.120133057, -0.110699892, -0.260757744], [-0.1397838, -0.126450539, 0.30517146, 0.184304431, -0.0930630564, -7.3810824E-4, 0.0160566941, 0.0380945243, -0.0847687795, -2.037490e-01], [-0.0283999704, -0.162262782, -0.0509804338, -0.0250009838, 0.205954537, 0.0246407222, -0.15305832, 0.01729705, 0.00185124797, 0.145579904], [0.107952237, 0.17434299, 0.0141776204, -0.128442273, 0.120245196, 0.0513597503, -0.0178853422, -0.165091217, 0.228567258, -0.205565557], [-0.272574723, 0.110321201, -0.249659762, 0.102074012, 0.214858428, 0.104048625, 0.260062635, -0.246641651, -0.162898168, -0.085429348], [0.0347707979, 0.121450476, 0.0601244941, -0.036875464, 0.0601903833, 0.0391768366, 0.177384406, 0.127554491, -0.00152025023, -0.0243941341], [0.182021648, -0.141452819, 0.0396866649, -0.0725521743, 0.0052056904, 6.541660e-02, -0.158098981, 0.368424326, -0.246160388, 0.0400460847], [-0.0415195115, -0.0794856697, 0.256927609, -0.0828218385, -0.333121389, 0.0690186173, 0.00953093916, -0.125028566, 0.0978005602, -0.0218165014], [0.254078567, -0.273427039, -0.0916603953, 0.284437299, -6.798040e-02, -0.186892554, 0.0270918794, 0.323385864, -0.13483052, -0.0614349432], [-0.0134747522, 0.229401737, 0.290741056, 0.134622812, 0.150844321, 0.0588404089, -0.0920286179, 0.243959576, 0.0415463932, -0.364745617], [0.0721129328, 0.335333854, -0.181168884, -0.0742811784, 0.202567935, -0.0618376844, -0.144345105, -0.109949991, -0.224407122, 0.046522852], [0.0943903848, 0.308363646, 0.20886232, -0.267283678, 0.0870875865, -0.266921341, -0.0927265435, -0.02150652, 0.0339837074, 2.529730e-01], [-0.202570871, -0.115333825, -0.0360322334, 0.0286047366, -0.061448779, 0.194968984, 0.439014375, -0.0769344866, -0.203588605, -0.392715514], [0.134818882, 0.0152576296, -0.10848958, -0.263930678, -0.0335265473, -0.0154081546, -0.152287707, 0.25470081, 0.0869387611, 0.104704529], [0.261753291, -0.241538271, -0.151260406, -0.0338756777, -0.26997748, 0.305188417, 0.0146879423, -0.280027121, 0.284972101, -0.0740737915], [-0.0162396524, -0.224762663, 0.0944506824, -0.119866684, -0.099188067, -0.0926269814, -0.168556571, 0.151416615, -0.148968905, -0.139955819], [0.102667071, 0.326664209, 0.0275067855, 0.244041294, -0.171143144, -0.274285197, 0.327633411, 0.179012597, -0.370987713, 0.0510097407], [-0.00375827751, -0.00335882837, 0.117868364, 0.126431495, -0.154993251, -0.133509174, 0.102136597, -0.116545178, -0.0755984187, 0.301457614], [0.298711061, -0.0935267061, -5.403840e-02, -0.380565524, 0.199013397, 0.402721375, -0.119996756, -0.236993909, 0.159043133, 0.0585093945], [-0.399404407, -0.0206954628, 0.176582664, 0.357191801, 0.255237281, 0.178168833, -0.0550820492, -0.18484962, -0.212347254, 0.0684524626], [-0.153790683, -0.262719393, 0.0698453411, 0.0273735207, 0.103013918, -0.169419676, 0.144809633, 0.140440971, 0.163974196, 0.267165035], [0.135177806, -0.14423652, 0.0656731352, 0.181822658, 0.0578323714, 7.627260e-02, -0.0565378331, 0.32406491, -0.211514086, -0.303026915], [0.124107487, 0.0760767832, -0.0458727963, 0.126206547, 0.167545691, -0.0626680106, -0.0209572129, 0.180526376, -0.0798619166, -0.175614953], [-0.046498958, -0.0247157011, -0.262536258, 0.260433704, -0.168176442, 0.218313709, -0.296603799, -0.208868355, -0.0718513504, 0.173542261], [0.0661545843, -0.153439388, 0.0171819553, -0.215688378, 0.0260388013, 0.222951055, -0.330462664, 0.361601323, -0.274192423, 0.140660316], [0.223756596, 0.158161566, -0.255715221, 0.0161067527, -0.227913514, -0.0569394939, 0.270599872, 0.09462893, 0.0185249355, -0.0796072855], [-0.292885542, 0.19097206, -3.517100e-02, -0.175302714, 0.135310337, -0.1766157, -0.286993176, 0.0181622561, 3.830600e-02, 0.0484179817], [-0.141062513, -0.277427912, 0.349744588, -0.174416408, 0.162776902, -0.23186703, -0.0472403578, 0.0647463575, -0.366720527, 0.143775359], [-0.259507775, 0.119444966, -0.228743598, -0.21865353, 0.255828887, 0.031033745, 0.198043287, 0.112054668, 0.289750516, 0.101838917], [0.0101497006, -0.153242037, -0.274101019, 0.098690778, -0.160164535, -0.0547989979, -0.317405343, 0.305983931, -0.121285819, 0.31579864], [-0.145706415, -0.127123475, 0.204059973, -0.160934925, -0.0418121815, 0.267757356, -0.0834247842, 0.210071325, 0.0511884503, -0.120828666], [-0.175574556, -0.207353354, 0.0341571234, -0.0626117662, 9.453990e-02, 0.351277143, -0.188232571, -0.0355442725, 0.104938827, -0.12084689], [-0.129702508, 0.307411969, 0.0600315407, 0.167721972, 0.159753412, 0.127536565, -0.267242491, 0.261240542, -0.202292293, -0.292796612], [-0.295962691, 0.114182763, 0.209769711, -0.229083464, 0.195489138, 0.307449818, 0.0942512751, -0.134523198, 0.189630061, -0.0974499732], [-6.004620e-02, -0.137981221, 0.0733050704, -0.281068265, -0.0486596823, 0.306077898, 0.30778569, -0.392285049, 0.109688483, -0.226047099], [-0.0333333872, 0.0985616743, -0.00378937298, 6.560670e-02, -0.0436274335, 0.19267787, 0.0276476815, 0.038748417, 0.0321611241, 0.238686085], [0.0775437951, 0.00480031688, 0.0890703275, 0.0813173428, 0.156091303, 0.207606182, 0.335114539, -0.150621563, -0.394196957, 0.268333822], [0.153643906, -0.335132509, -0.312138796, 5.368770e-02, 0.235048816, 0.0310786515, 0.15642716, 0.208705202, 0.215524375, -0.134175345], [0.10188897, -0.0959656313, 0.367264211, 0.222671688, -0.266171932, 0.110148534, 0.0319485664, 0.0366752297, 0.123669371, -0.175061852], [-0.097238183, -0.0620822273, 0.0445226021, -0.088823609, -0.187241077, -4.567820e-02, -0.202858582, -0.139873311, 0.146043435, 0.124500103], [-0.0526548438, -0.198502213, -0.145166814, -0.0782446712, -0.146434411, 0.185514018, 0.184155926, 0.161838412, -0.0683488697, 0.159353316], [-0.271061063, 0.371537626, 0.267237723, 0.377635479, 0.0745741948, -0.265771866, -0.263954222, 0.269931138, 0.189870864, -0.260571092], [0.123850107, -1.806440e-01, 0.321234286, 0.0514725558, -0.0060727438, -0.395113736, -0.251065284, 0.272852391, 0.201696262, 0.234759703], [0.265518636, 0.211273089, -5.142330e-02, -7.88406352E-4, -0.175895303, -0.24738647, 0.330014527, -0.2292009, -0.0545187145, -0.35968858], [-0.157047808, 0.277920961, 0.273503274, -0.0130148269, -0.326813549, 0.143175527, -0.00963719096, 0.167440012, 0.210747063, -0.0891537964], [0.0310225263, -0.0776267573, 0.276278019, -0.139116824, 0.121790454, -0.395071626, -2.326950e-01, 0.257626921, -0.127569184, 0.264735043], [0.0237917807, -0.278158903, -0.0585371666, 0.0991684943, 0.145461574, -0.0225479398, 0.320570618, -0.0800962671, -4.982980e-02, -0.0771938786], [0.107598737, 0.0567134842, 0.0050173765, 0.135508671, -0.242831439, 0.206493735, -0.257223487, 0.25564608, 0.13105084, -0.0108935023], [-0.0815954431, -0.0600086078, 0.0493025444, -0.241900951, 0.0250020418, 0.35276854, -0.313097537, 0.270228475, -0.270950615, 0.253449827], [0.114727132, -0.222702235, -0.151071653, -0.208600566, 0.314997226, -0.174035355, 0.344284892, -0.143412217, 0.188850269, 0.124864407], [-0.190904632, 0.0299436115, -0.191528931, -0.381732821, 0.175428227, 0.0987710803, 0.192728072, 0.17655462, 0.216957316, 0.232865736], [0.303614467, 0.23164387, -0.26379016, 0.217460141, -0.259905845, 0.298513204, -0.184439868, 0.232353494, -0.381244898, -0.227710232], [0.0208108909, 0.00839824323, -0.126433596, 0.192780793, 0.0649723336, -0.0253005717, -0.0480954498, 0.0768189952, 0.209024802, 0.00325355236], [0.161162391, 0.30670771, -0.0654156655, -0.227617249, 0.066418536, -0.0750855356, 0.159480572, -0.00124128303, -0.0957149043, -0.116955064], [-0.211400643, -0.0276241545, -0.0924634262, 0.395741016, 0.252569556, -0.28579244, -0.236819223, 0.046667546, 0.264011323, 0.311949909], [0.0855956227, -0.110965215, 0.111237064, -0.131076068, 0.0891130492, -0.155705988, -0.130113155, 0.254974216, 0.153850585, -0.0406821221]]> : tensor<128x10xf32>
     %8 = "xla_hlo.dot"(%7, %cst_2) {name = "dot.13", precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x128xf32>, tensor<128x10xf32>) -> tensor<1x10xf32>
     %cst_3 = constant  {name = "constant.14"} dense<[-0.0990044474, 0.106343128, 0.00506669516, -0.0421980098, 0.021251943, 0.200870886, 9.79547156E-4, 0.0746576563, -0.234273568, -0.0336938724]> : tensor<10xf32>
@@ -33,11 +33,11 @@
     %cst_4 = constant  {name = "constant.17"} dense<0xFF800000> : tensor<f32>
     %11 = "xla_hlo.reduce"(%10, %cst_4) ( {
     ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>):   // no predecessors
-      %20 = "xla_hlo.max"(%arg1, %arg2) {name = "maximum.21"} : (tensor<f32>, tensor<f32>) -> tensor<f32>
+      %20 = "xla_hlo.maximum"(%arg1, %arg2) {name = "maximum.21"} : (tensor<f32>, tensor<f32>) -> tensor<f32>
       "xla_hlo.return"(%20) : (tensor<f32>) -> ()
     }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>
     %12 = "xla_hlo.broadcast_in_dim"(%11) {broadcast_dimensions = dense<0> : tensor<1xi64>, name = "broadcast.23"} : (tensor<1xf32>) -> tensor<1x10xf32>
-    %13 = "xla_hlo.sub"(%10, %12) {name = "subtract.24"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
+    %13 = "xla_hlo.subtract"(%10, %12) {name = "subtract.24"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
     %14 = "xla_hlo.exp"(%13) {name = "exponential.25"} : (tensor<1x10xf32>) -> tensor<1x10xf32>
     %cst_5 = constant  {name = "constant.27"} dense<0.000000e+00> : tensor<f32>
     %15 = "xla_hlo.reduce"(%14, %cst_5) ( {
@@ -46,7 +46,7 @@
       "xla_hlo.return"(%21) : (tensor<f32>) -> ()
     }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>
     %16 = "xla_hlo.broadcast_in_dim"(%15) {broadcast_dimensions = dense<0> : tensor<1xi64>, name = "broadcast.34"} : (tensor<1xf32>) -> tensor<1x10xf32>
-    %17 = "xla_hlo.div"(%14, %16) {name = "divide.35"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
+    %17 = "xla_hlo.divide"(%14, %16) {name = "divide.35"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
     %18 = "xla_hlo.reshape"(%17) {name = "reshape.36"} : (tensor<1x10xf32>) -> tensor<1x10xf32>
     %19 = "xla_hlo.tuple"(%18) {name = "tuple.37"} : (tensor<1x10xf32>) -> tuple<tensor<1x10xf32>>
     return %19 : tuple<tensor<1x10xf32>>