[LLVMCPU][ArmSME] Rework how Arm streaming mode is set on dispatches (#17646)

Previously, when the `+sme` feature flag was set Armv9 streaming SVE
mode would be enabled for all dispatch regions lowered with the
following experts:

- `CPUBufferOpsTileAndVectorize`
- `CPUConvTileAndDecomposeExpert`
- `CPUDoubleTilingExpert`

This was not ideal as meant streaming mode could be added to dispatch
regions that made no use of scalable vectors, where the (possibly)
larger streaming vector length provides no benefit, and there may be a
cost due to other overheads.

There was also a flag `--iree-experimental-llvmcpu-arm-force-ssve` which
contrary to its name _did not_ force streaming SVE mode. What this flag
did do was disable tiling for 2D scalable ArmSME operations, then rely
on something else later on setting the streaming mode (but it did not
control it).

The patch aims to add clearer and more directed ways to enable streaming
mode.

First, streaming mode is no longer set in any lowering experts (it's a
fairly low-level concept, that does not need to be configured early in
the pipeline). Second, the old
`--iree-experimental-llvmcpu-arm-force-ssve` flag is removed.

Now to control tiling for ArmSME and using streaming mode there are two
new flags.

`iree-llvmcpu-disable-arm-sme-tiling`:

This disables tiling for ArmSME (i.e. using 2D scalable tile sizes),
even when the `+sme` feature flag is set. This results in operations
instead being tiled for SVE or Neon (depending on the configuration).

`iree-llvmcpu-force-arm-streaming`:

This enables Arm streaming mode for any dispatch regions that contain
scalable vectors. It ignores dispatches that don't contain scalable
vectors as enabling streaming mode would provide no benefit.

ci-extra: build_test_all_arm64

---------

Signed-off-by: Benjamin Maxwell <benjamin.maxwell@arm.com>
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp b/compiler/src/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp
index 175c1ae..9ce86e9 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp
@@ -92,6 +92,12 @@
                    "target (e.g., +sve, +sve2 and/or +sme feature flags)"),
     llvm::cl::init(false));
 
+static llvm::cl::opt<bool> clDisableArmSMETiling(
+    "iree-llvmcpu-disable-arm-sme-tiling",
+    llvm::cl::desc("Disables tiling for SME even if it is supported by the "
+                   "target (i.e., when the +sme feature flag is present)"),
+    llvm::cl::init(false));
+
 // Non-static options are used in other places.
 llvm::cl::opt<bool> clEnableTransformDialectJit(
     "iree-llvmcpu-enable-transform-dialect-jit",
@@ -114,17 +120,6 @@
   None
 };
 
-// NOTE: This flag is meant for testing + experimentation and should not be
-// used in deployment.
-static llvm::cl::opt<bool> clExperimentalArmForceSSVE(
-    "iree-experimental-llvmcpu-arm-force-ssve",
-    llvm::cl::desc(
-        "Controls whether to disable SME tiling when SME+SSVE are enabled "
-        "with +sme. As a result, IREE will effectively target SSVE "
-        "instead of SME. This flag is experimental and should only be "
-        "used for testing."),
-    llvm::cl::init(false));
-
 // Use this flag to override IREE's heuristics for selecting the pre-processing
 // strategy.
 static llvm::cl::opt<VectorPreProcStrategy> clPProcStrategy(
@@ -1326,7 +1321,7 @@
   // TODO: Compute vector tile sizes using heuristics.
 
   if (isAArch64(targetAttr)) {
-    if (clEnableScalableVectorization && !clExperimentalArmForceSSVE &&
+    if (clEnableScalableVectorization && !clDisableArmSMETiling &&
         hasSMEFeature(targetAttr)) {
       // Note: This may not pick any sizes (which will fallback to the scalable
       // vectorization heuristics below).
@@ -2038,7 +2033,8 @@
   if (failed(elementType))
     return;
 
-  if (hasSMEFeature(targetAttr) && clEnableScalableVectorization) {
+  if (hasSMEFeature(targetAttr) && clEnableScalableVectorization &&
+      !clDisableArmSMETiling) {
     if (elementType->isF32()) {
       sizes.append({4, 4});
     } else if (elementType->isF64()) {
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/Passes.cpp b/compiler/src/iree/compiler/Codegen/LLVMCPU/Passes.cpp
index 6a4363f..10bbc3b 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/Passes.cpp
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/Passes.cpp
@@ -91,6 +91,19 @@
                    "LLVMCPUMmt4dVectorLowering pass."),
     llvm::cl::init(false));
 
+// By default, IREE does not enable the Armv9-A streaming SVE mode in the
+// presence of scalable vectors (even when using `+sme`), as currently there's
+// no cost model of when it could be beneficial. This flag will effectively make
+// IREE/LLVM switch from SVE to SSVE in dispatch regions with supported
+// scalable vector operations.
+static llvm::cl::opt<bool> clForceArmStreaming(
+    "iree-llvmcpu-force-arm-streaming",
+    llvm::cl::desc(
+        "Enables Armv9-A streaming SVE mode for any dispatch region that "
+        "contains supported scalable vector operations (i.e., use SSVE rather "
+        "than SVE). Requires the +sme feature flag."),
+    llvm::cl::init(false));
+
 static void addTileAndDistributePasses(OpPassManager &funcPassManager) {
   funcPassManager.addPass(createTileAndDistributeToWorkgroupsPass());
   funcPassManager.addPass(createConvertToDestinationPassingStylePass());
@@ -340,10 +353,6 @@
     options.enableArmI8mm = pipelineOpt.enableAArch64I8mm;
     buildLLVMCPUVectorLoweringPipeline(funcPassManager, options);
   }
-
-  if (pipelineOpt.enableAArch64SSVE)
-    funcPassManager.addPass(mlir::arm_sme::createEnableArmStreamingPass(
-        mlir::arm_sme::ArmStreamingMode::StreamingLocally));
 }
 
 void addMultiTilingExpertPassPipeline(OpPassManager &funcPassManager,
@@ -422,10 +431,6 @@
     options.enableArmI8mm = pipelineOpt.enableAArch64I8mm;
     buildLLVMCPUVectorLoweringPipeline(funcPassManager, options);
   }
-
-  if (pipelineOpt.enableAArch64SSVE)
-    funcPassManager.addPass(mlir::arm_sme::createEnableArmStreamingPass(
-        mlir::arm_sme::ArmStreamingMode::StreamingLocally));
 }
 
 void addConvTileAndDecomposeExpertPassPipeline(
@@ -487,10 +492,6 @@
     options.enableArmI8mm = pipelineOpt.enableAArch64I8mm;
     buildLLVMCPUVectorLoweringPipeline(funcPassManager, options);
   }
-
-  if (pipelineOpt.enableAArch64SSVE)
-    funcPassManager.addPass(mlir::arm_sme::createEnableArmStreamingPass(
-        mlir::arm_sme::ArmStreamingMode::StreamingLocally));
 }
 
 void addMmt4dTilingExpertPassPipeline(OpPassManager &funcPassManager,
@@ -693,15 +694,29 @@
   if (enableAArch64SME) {
     modulePassManager.addPass(mlir::arm_sme::createVectorLegalizationPass());
     FunctionLikeNest(modulePassManager)
+        .addPredicatedPass(
+            clForceArmStreaming,
+            [] {
+              // 1. Enable Armv9-A streaming mode without ZA (i.e., SSVE) for
+              // dispatch regions that contain scalable vectors when forced via
+              // the --iree-llvmcpu-force-arm-streaming flag.
+              return mlir::arm_sme::createEnableArmStreamingPass(
+                  mlir::arm_sme::ArmStreamingMode::StreamingLocally,
+                  mlir::arm_sme::ArmZaMode::Disabled,
+                  /*ifRequiredByOps=*/false,
+                  /*ifContainsScalableVectors=*/true);
+            })
         .addPass(createCanonicalizerPass)
         .addPass(createCSEPass)
         .addPass(mlir::createArithToArmSMEConversionPass)
         .addPass(mlir::createConvertVectorToArmSMEPass)
-        .addPass([]() {
+        .addPass([] {
+          // 2. Enable ZA for dispatch regions that contain ArmSME ops (which
+          // all make use of the ZA state).
           return mlir::arm_sme::createEnableArmStreamingPass(
               mlir::arm_sme::ArmStreamingMode::StreamingLocally,
               mlir::arm_sme::ArmZaMode::NewZA,
-              /*onlyIfRequiredByOps=*/true);
+              /*ifRequiredByOps=*/true);
         })
         .addPass(mlir::createConvertArmSMEToSCFPass);
   }
@@ -850,12 +865,22 @@
         buildLLVMCPUVectorLoweringPipeline(funcPassManager, options);
       });
 
-  static PassPipelineRegistration<> LinalgLLVMPipeline(
-      "iree-codegen-linalg-to-llvm-pipeline",
-      "Runs the progressive lowering pipeline from Linalg to LLVM",
-      [](OpPassManager &variantPassManager) {
-        buildLLVMCPUCodegenPassPipeline(variantPassManager);
-      });
+  struct LinalgToLLVMPipelineOptions
+      : public PassPipelineOptions<LinalgToLLVMPipelineOptions> {
+    Option<bool> enableArmSME{
+        *this, "enable-arm-sme",
+        llvm::cl::desc("Enable the ArmSME lowering pipeline.")};
+  };
+
+  static PassPipelineRegistration<LinalgToLLVMPipelineOptions>
+      LinalgLLVMPipeline(
+          "iree-codegen-linalg-to-llvm-pipeline",
+          "Runs the progressive lowering pipeline from Linalg to LLVM",
+          [](OpPassManager &variantPassManager,
+             LinalgToLLVMPipelineOptions const &options) {
+            buildLLVMCPUCodegenPassPipeline(variantPassManager,
+                                            options.enableArmSME);
+          });
 
   static PassPipelineRegistration<> LLVMCPULinkingPipeline(
       "iree-codegen-llvmcpu-linking-pipeline",
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/BUILD.bazel b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/BUILD.bazel
index 4c4c735..cd9c4e9 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/BUILD.bazel
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/BUILD.bazel
@@ -36,12 +36,12 @@
             "hal_interface_workgroup_info.mlir",
             "illegal_configuration.mlir",
             "peel.mlir",
+            "pipeline_arm_sme_streaming_mode_tests.mlir",
             "pipeline_pack_unpack_tests.mlir",
             "pipeline_pad_conv_tests.mlir",
             "pipeline_pad_tests.mlir",
             "pipeline_peel_and_vectorize_tests.mlir",
             "pipeline_split_reduction_tests.mlir",
-            "pipeline_ssve_tests.mlir",
             "pipeline_tests.mlir",
             "pipeline_transpose_avx2_tests.mlir",
             "pipeline_vector_masking_tests.mlir",
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/CMakeLists.txt b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/CMakeLists.txt
index 4c66fb4..de22f62 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/CMakeLists.txt
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/CMakeLists.txt
@@ -31,12 +31,12 @@
     "hal_interface_workgroup_info.mlir"
     "illegal_configuration.mlir"
     "peel.mlir"
+    "pipeline_arm_sme_streaming_mode_tests.mlir"
     "pipeline_pack_unpack_tests.mlir"
     "pipeline_pad_conv_tests.mlir"
     "pipeline_pad_tests.mlir"
     "pipeline_peel_and_vectorize_tests.mlir"
     "pipeline_split_reduction_tests.mlir"
-    "pipeline_ssve_tests.mlir"
     "pipeline_tests.mlir"
     "pipeline_transpose_avx2_tests.mlir"
     "pipeline_vector_masking_tests.mlir"
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/pipeline_arm_sme_streaming_mode_tests.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/pipeline_arm_sme_streaming_mode_tests.mlir
new file mode 100644
index 0000000..3ff496a
--- /dev/null
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/pipeline_arm_sme_streaming_mode_tests.mlir
@@ -0,0 +1,94 @@
+// RUN: iree-opt --iree-codegen-linalg-to-llvm-pipeline=enable-arm-sme --split-input-file %s | FileCheck %s
+// RUN: iree-opt --iree-codegen-linalg-to-llvm-pipeline=enable-arm-sme --iree-llvmcpu-force-arm-streaming --split-input-file %s | FileCheck %s -check-prefixes=FORCE-ARM-STREAMING
+
+module {
+module {
+  func.func @fixed_size_dispatch() attributes {hal.executable.target = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {cpu_features = "+sve,+sme", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>,
+      translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} {
+    %c0 = arith.constant 0 : index
+    %c1 = arith.constant 1 : index
+    %cst = arith.constant 0.000000e+00 : f32
+    %0 = hal.interface.constant.load[0] : i32
+    %1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<readwrite:tensor<1xf32>>
+    %2 = tensor.empty() : tensor<1xf32>
+    %3 = linalg.fill {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[0], [1], [0], [0]]>}
+        ins(%cst : f32) outs(%2 : tensor<1xf32>) -> tensor<1xf32>
+    flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xf32> -> !flow.dispatch.tensor<readwrite:tensor<1xf32>>
+    return
+  }
+}
+}
+
+/// A dispatch region that only uses fixed-size vectors should never use
+/// streaming mode.
+
+// CHECK: @fixed_size_dispatch
+// CHECK-NOT: arm_locally_streaming
+// CHECK-NOT: arm_new_za
+
+// FORCE-ARM-STREAMING: @fixed_size_dispatch
+// FORCE-ARM-STREAMING-NOT: arm_locally_streaming
+// FORCE-ARM-STREAMING-NOT: arm_new_za
+
+// -----
+
+module {
+module {
+  func.func @scalable_dispatch() attributes {hal.executable.target = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {cpu_features = "+sve,+sme", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>,
+      translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} {
+    %c0 = arith.constant 0 : index
+    %c1 = arith.constant 1 : index
+    %cst = arith.constant 0.000000e+00 : f32
+    %0 = hal.interface.constant.load[0] : i32
+    %1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<readwrite:tensor<1xf32>>
+    %2 = tensor.empty() : tensor<1xf32>
+    %3 = linalg.fill {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[0], [[1]], [0], [0]]>}
+        ins(%cst : f32) outs(%2 : tensor<1xf32>) -> tensor<1xf32>
+    flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xf32> -> !flow.dispatch.tensor<readwrite:tensor<1xf32>>
+    return
+  }
+}
+}
+
+/// A dispatch region that uses scalable vectors (but not ArmSME dialect
+/// operations) should only use streaming if `--iree-llvmcpu-force-arm-streaming`
+/// is set.
+
+// CHECK: @scalable_dispatch
+// CHECK-NOT: arm_locally_streaming
+// CHECK-NOT: arm_new_za
+
+// FORCE-ARM-STREAMING: @scalable_dispatch
+// FORCE-ARM-STREAMING-NOT: arm_new_za
+// FORCE-ARM-STREAMING-SAME: arm_locally_streaming
+
+// -----
+
+module {
+module {
+  func.func @scalable_dispatch_using_za() attributes {hal.executable.target = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {cpu_features = "+sve,+sme", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>,
+      translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} {
+    %c0 = arith.constant 0 : index
+    %c1 = arith.constant 1 : index
+    %cst = arith.constant 0.000000e+00 : f32
+    %0 = hal.interface.constant.load[0] : i32
+    %1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<readwrite:tensor<100x100xf32>>
+    %2 = tensor.empty() : tensor<100x100xf32>
+    %3 = linalg.fill {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[0, 0], [[4], [4]], [0, 0], [0, 0]]>}
+        ins(%cst : f32) outs(%2 : tensor<100x100xf32>) -> tensor<100x100xf32>
+    flow.dispatch.tensor.store %3, %1, offsets = [0, 0], sizes = [100, 100], strides = [100, 1] : tensor<100x100xf32> -> !flow.dispatch.tensor<readwrite:tensor<100x100xf32>>
+    return
+  }
+}
+}
+
+/// A dispatch region that uses ArmSME operations (that require the ZA state)
+/// should ways have streaming mode and ZA enabled.
+
+// CHECK: @scalable_dispatch_using_za
+// CHECK-SAME: arm_locally_streaming
+// CHECK-SAME: arm_new_za
+
+// FORCE-ARM-STREAMING: @scalable_dispatch_using_za
+// FORCE-ARM-STREAMING-SAME: arm_locally_streaming
+// FORCE-ARM-STREAMING-SAME: arm_new_za
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/pipeline_ssve_tests.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/pipeline_ssve_tests.mlir
deleted file mode 100644
index f0dae66..0000000
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/pipeline_ssve_tests.mlir
+++ /dev/null
@@ -1,90 +0,0 @@
-// RUN: iree-opt --pass-pipeline='builtin.module(iree-llvmcpu-select-lowering-strategy, func.func(iree-llvmcpu-lower-executable-target))' --split-input-file %s | FileCheck %s
-
-// Check Armv9 Streaming SVE mode is enabled for the following pipelines:
-//
-//   * CPUBufferOpsTileAndVectorize
-//   * CPUConvTileAndDecomposeExpert
-//   * CPUDoubleTilingExpert
-
-#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {cpu_features = "+sve,+sme", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
-module {
-  func.func @dispatch() attributes {hal.executable.target = #executable_target_embedded_elf_arm_64_,
-      translation_info = #iree_codegen.translation_info<CPUBufferOpsTileAndVectorize>} {
-    %c0 = arith.constant 0 : index
-    %c1 = arith.constant 1 : index
-    %cst = arith.constant 0.000000e+00 : f32
-    %0 = hal.interface.constant.load[0] : i32
-    %1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<readwrite:tensor<1xf32>>
-    %2 = tensor.empty() : tensor<1xf32>
-    %3 = linalg.fill {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[0], [1], [0], [0]]>}
-        ins(%cst : f32) outs(%2 : tensor<1xf32>) -> tensor<1xf32>
-    flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xf32> -> !flow.dispatch.tensor<readwrite:tensor<1xf32>>
-    return
-  }
-}
-
-// CHECK: func.func @dispatch()
-// CHECK-SAME: arm_locally_streaming
-
-// -----
-#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {cpu_features = "+sve,+sme", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
-module {
-  func.func @dispatch() attributes {hal.executable.target = #executable_target_embedded_elf_arm_64_,
-      translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} {
-    %c0 = arith.constant 0 : index
-    %c1 = arith.constant 1 : index
-    %cst = arith.constant 0.000000e+00 : f32
-    %0 = hal.interface.constant.load[0] : i32
-    %1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<readwrite:tensor<1xf32>>
-    %2 = tensor.empty() : tensor<1xf32>
-    %3 = linalg.fill {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[0], [1], [0], [0]]>}
-        ins(%cst : f32) outs(%2 : tensor<1xf32>) -> tensor<1xf32>
-    flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xf32> -> !flow.dispatch.tensor<readwrite:tensor<1xf32>>
-    return
-  }
-}
-
-// CHECK: func.func @dispatch()
-// CHECK-SAME: arm_locally_streaming
-
-// -----
-#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {cpu_features = "+sve,+sme", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
-module {
-  func.func @dispatch() attributes {hal.executable.target = #executable_target_embedded_elf_arm_64_,
-      translation_info = #iree_codegen.translation_info<CPUConvTileAndDecomposeExpert>} {
-    %c0 = arith.constant 0 : index
-    %c1 = arith.constant 1 : index
-    %cst = arith.constant 0.000000e+00 : f32
-    %0 = hal.interface.constant.load[0] : i32
-    %1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<readwrite:tensor<1xf32>>
-    %2 = tensor.empty() : tensor<1xf32>
-    %3 = linalg.fill {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[0], [1], [0], [0]]>}
-        ins(%cst : f32) outs(%2 : tensor<1xf32>) -> tensor<1xf32>
-    flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xf32> -> !flow.dispatch.tensor<readwrite:tensor<1xf32>>
-    return
-  }
-}
-
-// CHECK: func.func @dispatch()
-// CHECK-SAME: arm_locally_streaming
-
-// -----
-#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {cpu_features = "+sme", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
-module {
-  func.func @dispatch() attributes {hal.executable.target = #executable_target_embedded_elf_arm_64_,
-      translation_info = #iree_codegen.translation_info<CPUConvTileAndDecomposeExpert>} {
-    %c0 = arith.constant 0 : index
-    %c1 = arith.constant 1 : index
-    %cst = arith.constant 0.000000e+00 : f32
-    %0 = hal.interface.constant.load[0] : i32
-    %1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<readwrite:tensor<1xf32>>
-    %2 = tensor.empty() : tensor<1xf32>
-    %3 = linalg.fill {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[0], [1], [0], [0]]>}
-        ins(%cst : f32) outs(%2 : tensor<1xf32>) -> tensor<1xf32>
-    flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xf32> -> !flow.dispatch.tensor<readwrite:tensor<1xf32>>
-    return
-  }
-}
-
-// CHECK: func.func @dispatch()
-// CHECK-NOT: arm_locally_streaming
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/select_aarch64_sve_lowering_strategy.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/select_aarch64_sve_lowering_strategy.mlir
index 64dc73d..4944337 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/select_aarch64_sve_lowering_strategy.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/select_aarch64_sve_lowering_strategy.mlir
@@ -1,7 +1,7 @@
 // RUN: iree-opt --pass-pipeline='builtin.module(iree-llvmcpu-select-lowering-strategy)' \
 // RUN:   --iree-llvmcpu-enable-scalable-vectorization=true --split-input-file %s | FileCheck %s --check-prefixes=CHECK,WITH-SME
 // RUN: iree-opt --pass-pipeline='builtin.module(iree-llvmcpu-select-lowering-strategy)' \
-// RUN:   --iree-llvmcpu-enable-scalable-vectorization=true --split-input-file  --iree-experimental-llvmcpu-arm-force-ssve=true %s | FileCheck %s --check-prefixes=CHECK,SSVE-WITHOUT-SME
+// RUN:   --iree-llvmcpu-enable-scalable-vectorization=true --split-input-file --iree-llvmcpu-disable-arm-sme-tiling %s | FileCheck %s --check-prefixes=CHECK,DISABLE-ARM-SME
 
 #executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {cpu_features = "+sve", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
 module {
@@ -101,12 +101,12 @@
   }
 }
 
-//  SSVE-WITHOUT-SME-DAG: #[[CONFIG:.+]] = #iree_codegen.lowering_config<tile_sizes = {{\[}}[64, 64, 0], [8, [16], 0], [0, 0, 1], [0, 0, 0]]>
-//  SSVE-WITHOUT-SME-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUDoubleTilingExpert>
-//      SSVE-WITHOUT-SME: func.func @matmul_tensors()
-//  SSVE-WITHOUT-SME-SAME:     translation_info = #[[TRANSLATION]]
-//       SSVE-WITHOUT-SME: linalg.matmul
-//  SSVE-WITHOUT-SME-SAME:     lowering_config = #[[CONFIG]]
+//  DISABLE-ARM-SME-DAG: #[[CONFIG:.+]] = #iree_codegen.lowering_config<tile_sizes = {{\[}}[64, 64, 0], [8, [16], 0], [0, 0, 1], [0, 0, 0]]>
+//  DISABLE-ARM-SME-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUDoubleTilingExpert>
+//      DISABLE-ARM-SME: func.func @matmul_tensors()
+//  DISABLE-ARM-SME-SAME:     translation_info = #[[TRANSLATION]]
+//       DISABLE-ARM-SME: linalg.matmul
+//  DISABLE-ARM-SME-SAME:     lowering_config = #[[CONFIG]]
 
 //   WITH-SME-DAG: #[[CONFIG:.+]] = #iree_codegen.lowering_config<tile_sizes = {{\[}}[64, 64, 0], {{\[}}[8], [8], 0], [0, 0, 1], [0, 0, 0]]>
 //   WITH-SME-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUDoubleTilingExpert>