Forking dynamic behavior from flow.tensor.constant. (#17034)

Needed this sooner than I expected due to requiring the ConstantLike
trait on the constant op in order for constant materialization to work.
Now the special behavior is moved to the flow.tensor.dynamic_constant
op.

Follow-up to #17024.
diff --git a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowBase.td b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowBase.td
index 1f2cc20..78b9036 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowBase.td
+++ b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowBase.td
@@ -11,6 +11,7 @@
 include "iree/compiler/Dialect/Util/IR/UtilBase.td"
 include "iree/compiler/Dialect/Util/IR/UtilTypes.td"
 include "mlir/IR/AttrTypeBase.td"
+include "mlir/IR/BuiltinAttributeInterfaces.td"
 
 //===----------------------------------------------------------------------===//
 // IREE execution flow dialect
@@ -233,4 +234,33 @@
   let cppNamespace = "mlir::iree_compiler::IREE::Flow";
 }
 
+//===----------------------------------------------------------------------===//
+// Parameter storage attributes
+//===----------------------------------------------------------------------===//
+
+def FLOW_NamedParameterAttr :
+    AttrDef<Flow_Dialect, "NamedParameter", [
+      TypedAttrInterface,
+    ]> {
+  let mnemonic = "parameter.named";
+  let summary = [{named parameter referenced an optional scope and key}];
+  let description = [{
+    Species an externally-defined parameter that can be referenced by an
+    optional scope defining a set of parameters and a key uniquely identifying
+    the parameter within its scope.
+  }];
+  let parameters = (ins
+    AttributeSelfTypeParameter<"">:$type,
+    OptionalParameter<"StringAttr">:$scope,
+    AttrParameter<"StringAttr", "">:$key,
+    OptionalParameter<"DictionaryAttr">:$config
+  );
+  let assemblyFormat = [{
+    `<`
+    custom<ParameterReference>($scope, $key)
+    (`,` $config^)?
+    `>`
+  }];
+}
+
 #endif  // IREE_DIALECT_FLOW_BASE
diff --git a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowDialect.cpp b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowDialect.cpp
index 5b0d2e2..577ad48 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowDialect.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowDialect.cpp
@@ -74,8 +74,12 @@
 
 Operation *FlowDialect::materializeConstant(OpBuilder &builder, Attribute value,
                                             Type type, Location loc) {
-  if (arith::ConstantOp::isBuildableWith(value, type))
+  if (arith::ConstantOp::isBuildableWith(value, type)) {
     return builder.create<arith::ConstantOp>(loc, type, cast<TypedAttr>(value));
+  } else if (IREE::Flow::TensorConstantOp::isBuildableWith(value, type)) {
+    return builder.create<IREE::Flow::TensorConstantOp>(loc, type,
+                                                        cast<TypedAttr>(value));
+  }
   return nullptr;
 }
 
diff --git a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp
index d37b28c..a2079c3 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp
@@ -779,7 +779,13 @@
 // flow.tensor.constant
 //===----------------------------------------------------------------------===//
 
-OpFoldResult TensorConstantOp::fold(FoldAdaptor operands) {
+OpFoldResult TensorConstantOp::fold(FoldAdaptor operands) { return getValue(); }
+
+//===----------------------------------------------------------------------===//
+// flow.tensor.dynamic_constant
+//===----------------------------------------------------------------------===//
+
+OpFoldResult TensorDynamicConstantOp::fold(FoldAdaptor operands) {
   auto dynamicType = getType();
   if (dynamicType.getNumDynamicDims() == 0) {
     return getValue();
@@ -787,6 +793,43 @@
   return {};
 }
 
+namespace {
+
+struct ExpandDynamicShapeConstant
+    : public OpRewritePattern<TensorDynamicConstantOp> {
+  using OpRewritePattern<TensorDynamicConstantOp>::OpRewritePattern;
+  LogicalResult matchAndRewrite(TensorDynamicConstantOp op,
+                                PatternRewriter &rewriter) const override {
+    auto constantOp = rewriter.create<IREE::Flow::TensorConstantOp>(
+        op.getLoc(), op.getValue());
+    auto dynamicType = op.getType();
+    auto staticType = cast<ShapedType>(op.getValue().getType());
+    SmallVector<Value> dynamicDims;
+    for (int64_t i = 0; i < dynamicType.getRank(); ++i) {
+      if (dynamicType.isDynamicDim(i)) {
+        auto dimValue = rewriter
+                            .create<arith::ConstantIndexOp>(
+                                op.getLoc(), staticType.getDimSize(i))
+                            .getResult();
+        dynamicDims.push_back(rewriter
+                                  .create<IREE::Util::OptimizationBarrierOp>(
+                                      op.getLoc(), dimValue)
+                                  .getResult(0));
+      }
+    }
+    rewriter.replaceOpWithNewOp<IREE::Flow::TensorReshapeOp>(
+        op, dynamicType, constantOp.getResult(), dynamicDims);
+    return success();
+  }
+};
+
+} // namespace
+
+void TensorDynamicConstantOp::getCanonicalizationPatterns(
+    RewritePatternSet &results, MLIRContext *context) {
+  results.insert<ExpandDynamicShapeConstant>(context);
+}
+
 //===----------------------------------------------------------------------===//
 // flow.tensor.tie_shape
 //===----------------------------------------------------------------------===//
@@ -973,26 +1016,6 @@
       return success();
     }
 
-    // Special handling of flow.tensor.constant which may be acting as a
-    // dynamically shaped value that we want to remove the tensor.dim of but
-    // still treat the shape as dynamic. We do this by inserting an optimization
-    // barrier between the constant and the consumers. Note that this use case
-    // is very specific and generally only applicable to tests/benchmarks.
-    if (auto constantOp = dyn_cast_if_present<IREE::Flow::TensorConstantOp>(
-            op.getShapedValue().getDefiningOp())) {
-      auto valueType = dyn_cast<ShapedType>(constantOp.getValue().getType());
-      if (valueType && valueType != constantOp.getType()) {
-        // Constant op is acting as a cast. If the dimension being queried was
-        // static it would have been resolved above so we know it's dynamic
-        // here.
-        Value staticValue = rewriter.create<arith::ConstantIndexOp>(
-            op.getLoc(), valueType.getDimSize(idx));
-        rewriter.replaceOpWithNewOp<IREE::Util::OptimizationBarrierOp>(
-            op, staticValue);
-        return success();
-      }
-    }
-
     return rewriter.notifyMatchFailure(op, "no dynamic dims found/usable");
   }
 };
diff --git a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.cpp b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
index 0cb7297..c90bbfd 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
@@ -1597,35 +1597,18 @@
 // flow.tensor.constant
 //===----------------------------------------------------------------------===//
 
-ParseResult TensorConstantOp::parse(OpAsmParser &parser,
-                                    OperationState &result) {
-  if (parser.parseOptionalAttrDict(result.attributes))
-    return failure();
-  TypedAttr valueAttr;
-  if (failed(parser.parseAttribute(valueAttr)))
-    return failure();
-  result.addAttribute("value", valueAttr);
-  if (succeeded(parser.parseOptionalArrow())) {
-    Type resultType;
-    if (failed(parser.parseType(resultType)))
-      return failure();
-    result.addTypes(resultType);
-  } else {
-    result.addTypes(valueAttr.getType());
-  }
-  return success();
+// static
+bool TensorConstantOp::isBuildableWith(Attribute value, Type type) {
+  return isa<RankedTensorType>(type);
 }
 
-void TensorConstantOp::print(OpAsmPrinter &p) {
-  p << " ";
-  p.printOptionalAttrDict((*this)->getAttrs(), {"value"});
-  p.printAttribute(getValue());
-  auto attrType = getValue().getType();
-  auto resultType = getType();
-  if (attrType != resultType) {
-    p << " -> ";
-    p.printType(resultType);
-  }
+//===----------------------------------------------------------------------===//
+// flow.tensor.dynamic_constant
+//===----------------------------------------------------------------------===//
+
+// static
+bool TensorDynamicConstantOp::isBuildableWith(Attribute value, Type type) {
+  return TensorConstantOp::isBuildableWith(value, type);
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.td b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.td
index c797c03..d488b02 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.td
+++ b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.td
@@ -1040,26 +1040,62 @@
 
 let opDocGroup = OpGroupTensorOps in {
 
-def FLOW_TensorConstantOp : FLOW_PureOp<"tensor.constant"> {
+def FLOW_TensorConstantOp : FLOW_PureOp<"tensor.constant", [
+  ConstantLike,
+  AllTypesMatch<["value", "result"]>,
+]> {
   let summary = [{tensor constant that can have dynamic dimensions}];
   let description = [{
-    Allows specifying a tensor constant of IREE-specific types/attributes or
-    where the return value can erase shape information.
+    Allows specifying a tensor constant of IREE-specific types/attributes.
 
     ```mlir
     %cst = flow.tensor.constant #something_tensor_like : tensor<2x2xf32>
     %res = math.absf %cst : tensor<2x2xf32>
     ```
+  }];
+  let arguments = (ins TypedAttrInterface:$value);
+  let results = (outs AnyTensor:$result);
+
+  let assemblyFormat = [{
+    attr-dict $value
+  }];
+
+  let extraClassDeclaration = [{
+    // Returns true if the constant op can be built with the given attribute.
+    static bool isBuildableWith(Attribute value, Type type);
+  }];
+
+  let hasFolder = 1;
+}
+
+def FLOW_TensorDynamicConstantOp : FLOW_Op<"tensor.dynamic_constant"> {
+  let summary = [{tensor constant that can have dynamic dimensions}];
+  let description = [{
+    Allows specifying a tensor constant of IREE-specific types/attributes with
+    a dynamic shape that approximates a value as passed from the user. This
+    disables many optimizations and should only be used when testing or
+    benchmarking and wanting to ensure that dynamic dimension behavior is
+    preserved.
 
     ```mlir
-    %cst = flow.tensor.constant dense<4.0> : tensor<2x2xf32> -> tensor<?x2xf32>
+    %cst = flow.tensor.dynamic_constant #something_tensor_like : tensor<2x2xf32> -> tensor<?x2xf32>
     %res = math.absf %cst : tensor<?x2xf32>
     ```
   }];
   let arguments = (ins TypedAttrInterface:$value);
   let results = (outs AnyTensor:$result);
 
+  let assemblyFormat = [{
+    attr-dict $value `->` type($result)
+  }];
+
+  let extraClassDeclaration = [{
+    // Returns true if the constant op can be built with the given attribute.
+    static bool isBuildableWith(Attribute value, Type type);
+  }];
+
   let hasFolder = 1;
+  let hasCanonicalizer = 1;
 }
 
 def FLOW_TensorTieShapeOp : FLOW_PureOp<"tensor.tie_shape", [
diff --git a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowTypes.cpp b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowTypes.cpp
index 4f93ea7..c772d63 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowTypes.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowTypes.cpp
@@ -276,4 +276,38 @@
                                                     *collectiveElemType);
 }
 
+//===----------------------------------------------------------------------===//
+// custom<ParameterReference>($scope, $key)
+//===----------------------------------------------------------------------===//
+
+ParseResult parseParameterReference(AsmParser &parser, StringAttr &scopeAttr,
+                                    StringAttr &keyAttr) {
+  auto builder = parser.getBuilder();
+  StringAttr firstAttr;
+  if (failed(parser.parseCustomAttributeWithFallback(firstAttr,
+                                                     builder.getNoneType()))) {
+    return failure();
+  }
+  if (failed(parser.parseOptionalColon())) {
+    keyAttr = firstAttr;
+    return success();
+  }
+  scopeAttr = firstAttr;
+  if (failed(parser.parseColon()) ||
+      failed(parser.parseCustomAttributeWithFallback(keyAttr,
+                                                     builder.getNoneType()))) {
+    return failure();
+  }
+  return success();
+}
+
+void printParameterReference(AsmPrinter &p, StringAttr scopeAttr,
+                             StringAttr keyAttr) {
+  if (scopeAttr) {
+    p << "\"" << scopeAttr.getValue() << "\"";
+    p << "::";
+  }
+  p << "\"" << keyAttr.getValue() << "\"";
+}
+
 } // namespace mlir::iree_compiler::IREE::Flow
diff --git a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowTypes.h b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowTypes.h
index 3cc34ff..a1605c7 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowTypes.h
+++ b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowTypes.h
@@ -183,6 +183,20 @@
 std::optional<IREE::Flow::CollectiveElementType>
 convertToFlowCollectiveElementType(Type type);
 
+//===----------------------------------------------------------------------===//
+// custom<ParameterReference>($scope, $key)
+//===----------------------------------------------------------------------===//
+
+ParseResult parseParameterReference(AsmParser &parser, StringAttr &scopeAttr,
+                                    StringAttr &keyAttr);
+void printParameterReference(AsmPrinter &p, StringAttr scopeAttr,
+                             StringAttr keyAttr);
+static inline void printParameterReference(AsmPrinter &p, Operation *op,
+                                           StringAttr scopeAttr,
+                                           StringAttr keyAttr) {
+  printParameterReference(p, scopeAttr, keyAttr);
+}
+
 } // namespace mlir::iree_compiler::IREE::Flow
 
 #endif // IREE_COMPILER_DIALECT_FLOW_IR_FLOWTYPES_H_
diff --git a/compiler/src/iree/compiler/Dialect/Flow/IR/test/tensor_folding.mlir b/compiler/src/iree/compiler/Dialect/Flow/IR/test/tensor_folding.mlir
index de5a8d4..bcb1dbb 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/IR/test/tensor_folding.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/IR/test/tensor_folding.mlir
@@ -4,8 +4,8 @@
 util.func public @expandStaticShapeConstant() -> (tensor<2x4xi32>, index, index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
-  // CHECK-DAG: %[[CST:.+]] = arith.constant dense<2> : tensor<2x4xi32>
-  %0 = flow.tensor.constant dense<2> : tensor<2x4xi32> -> tensor<2x4xi32>
+  // CHECK-DAG: %[[CST:.+]] = flow.tensor.constant dense<2> : tensor<2x4xi32>
+  %0 = flow.tensor.constant dense<2> : tensor<2x4xi32>
   // CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index
   %d0 = tensor.dim %0, %c0 : tensor<2x4xi32>
   // CHECK-DAG: %[[C4:.+]] = arith.constant 4 : index
@@ -16,19 +16,20 @@
 
 // -----
 
-// CHECK-LABEL: @tensorDimOfDynamicConstant
-util.func public @tensorDimOfDynamicConstant() -> (index, index) {
+// CHECK-LABEL: @expandDynamicShapeConstant
+util.func public @expandDynamicShapeConstant() -> (tensor<2x?xi32>, index, index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
-  // CHECK-NOT: flow.tensor.constant
-  %0 = flow.tensor.constant dense<2> : tensor<2x4xi32> -> tensor<2x?xi32>
+  // CHECK-DAG: %[[CST:.+]] = flow.tensor.constant dense<2> : tensor<2x4xi32>
+  %0 = flow.tensor.dynamic_constant dense<2> : tensor<2x4xi32> -> tensor<2x?xi32>
   // CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index
-  %d0 = tensor.dim %0, %c0 : tensor<2x?xi32>
   // CHECK-DAG: %[[C4:.+]] = arith.constant 4 : index
-  // CHECK-DAG: %[[C4_DYNAMIC:.+]] = util.optimization_barrier %[[C4]]
+  // CHECK-DAG: %[[D1:.+]] = util.optimization_barrier %[[C4]] : index
+  // CHECK: %[[T:.+]] = flow.tensor.reshape %[[CST]] : tensor<2x4xi32> -> tensor<2x?xi32>{%[[D1]]}
+  %d0 = tensor.dim %0, %c0 : tensor<2x?xi32>
   %d1 = tensor.dim %0, %c1 : tensor<2x?xi32>
-  // CHECK: util.return %[[C2]], %[[C4_DYNAMIC]]
-  util.return %d0, %d1 : index, index
+  // CHECK: util.return %[[T]], %[[C2]], %[[D1]]
+  util.return %0, %d0, %d1 : tensor<2x?xi32>, index, index
 }
 
 // -----
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir
index 9a66945..f727f4e 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir
@@ -794,8 +794,8 @@
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %cst = arith.constant 0.000000e+00 : f32
-  %0 = flow.tensor.constant dense<[[1.500000e+01, 1.400000e+01, 1.300000e+01], [1.200000e+01, 1.100000e+01, 1.000000e+01], [9.000000e+00, 8.000000e+00, 7.000000e+00], [6.000000e+00, 5.000000e+00, 4.000000e+00], [3.000000e+00, 2.000000e+00, 1.000000e+00]]> : tensor<5x3xf32> -> tensor<?x?xf32>
-  %1 = flow.tensor.constant dense<[[1.500000e+01, 1.400000e+01, 1.300000e+01, 1.200000e+01, 1.100000e+01], [1.000000e+01, 9.000000e+00, 8.000000e+00, 7.000000e+00, 6.000000e+00], [5.000000e+00, 4.000000e+00, 3.000000e+00, 2.000000e+00, 1.000000e+00]]> : tensor<3x5xf32> -> tensor<?x?xf32>
+  %0 = flow.tensor.dynamic_constant dense<[[1.500000e+01, 1.400000e+01, 1.300000e+01], [1.200000e+01, 1.100000e+01, 1.000000e+01], [9.000000e+00, 8.000000e+00, 7.000000e+00], [6.000000e+00, 5.000000e+00, 4.000000e+00], [3.000000e+00, 2.000000e+00, 1.000000e+00]]> : tensor<5x3xf32> -> tensor<?x?xf32>
+  %1 = flow.tensor.dynamic_constant dense<[[1.500000e+01, 1.400000e+01, 1.300000e+01, 1.200000e+01, 1.100000e+01], [1.000000e+01, 9.000000e+00, 8.000000e+00, 7.000000e+00, 6.000000e+00], [5.000000e+00, 4.000000e+00, 3.000000e+00, 2.000000e+00, 1.000000e+00]]> : tensor<3x5xf32> -> tensor<?x?xf32>
   %2 = tensor.dim %0, %c0 : tensor<?x?xf32>
   %3 = tensor.dim %1, %c1 : tensor<?x?xf32>
   %4 = tensor.empty(%2, %3) : tensor<?x?xf32>
diff --git a/compiler/src/iree/compiler/Dialect/Stream/Conversion/FlowToStream/Patterns.cpp b/compiler/src/iree/compiler/Dialect/Stream/Conversion/FlowToStream/Patterns.cpp
index b80b75f..93f1aef 100644
--- a/compiler/src/iree/compiler/Dialect/Stream/Conversion/FlowToStream/Patterns.cpp
+++ b/compiler/src/iree/compiler/Dialect/Stream/Conversion/FlowToStream/Patterns.cpp
@@ -41,25 +41,51 @@
   LogicalResult
   matchAndRewrite(IREE::Flow::TensorConstantOp constantOp, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
+    // Capture the tensor constant strongly typed with constant lifetime.
+    Type constantType = IREE::Stream::ResourceType::get(
+        getContext(), IREE::Stream::Lifetime::Constant);
+    auto affinityAttr = IREE::Stream::AffinityAttr::lookup(constantOp);
+    auto newOp = rewriter.create<IREE::Stream::TensorConstantOp>(
+        constantOp.getLoc(), constantType, constantOp.getValue(),
+        TypeAttr::get(constantOp.getType()), ValueRange{}, affinityAttr);
+
+    // Transfer to unknown lifetime.
+    Type unknownType = IREE::Stream::ResourceType::get(getContext());
+    auto constantSize = rewriter.createOrFold<IREE::Stream::ResourceSizeOp>(
+        constantOp.getLoc(), rewriter.getIndexType(), newOp.getResult());
+    rewriter.replaceOpWithNewOp<IREE::Stream::AsyncTransferOp>(
+        constantOp, unknownType, newOp.getResult(), constantSize, constantSize,
+        /*source_affinity=*/affinityAttr,
+        /*result_affinity=*/affinityAttr);
+    return success();
+  }
+};
+
+struct ConvertTensorDynamicConstantOp
+    : public OpConversionPattern<IREE::Flow::TensorDynamicConstantOp> {
+public:
+  using OpConversionPattern::OpConversionPattern;
+  LogicalResult
+  matchAndRewrite(IREE::Flow::TensorDynamicConstantOp constantOp,
+                  OpAdaptor adaptor,
+                  ConversionPatternRewriter &rewriter) const override {
     auto attrType = dyn_cast<RankedTensorType>(constantOp.getValue().getType());
     if (!attrType)
       return failure();
     auto resultType = constantOp.getType();
 
     // If the op is acting as a dynamic value then preserve that behavior by
-    // calculation the shape through optimization barriers.
+    // calculating the shape through optimization barriers.
     SmallVector<Value> dynamicDims;
-    if (!resultType.hasStaticShape()) {
-      for (unsigned i = 0; i < resultType.getRank(); ++i) {
-        if (resultType.isDynamicDim(i)) {
-          Value staticDim = rewriter.create<arith::ConstantIndexOp>(
-              constantOp.getLoc(), attrType.getDimSize(i));
-          Value dynamicDim = rewriter
-                                 .create<IREE::Util::OptimizationBarrierOp>(
-                                     constantOp.getLoc(), staticDim)
-                                 .getResult(0);
-          dynamicDims.push_back(dynamicDim);
-        }
+    for (unsigned i = 0; i < resultType.getRank(); ++i) {
+      if (resultType.isDynamicDim(i)) {
+        Value staticDim = rewriter.create<arith::ConstantIndexOp>(
+            constantOp.getLoc(), attrType.getDimSize(i));
+        Value dynamicDim = rewriter
+                               .create<IREE::Util::OptimizationBarrierOp>(
+                                   constantOp.getLoc(), staticDim)
+                               .getResult(0);
+        dynamicDims.push_back(dynamicDim);
       }
     }
 
@@ -937,7 +963,7 @@
                                             TypeConverter &typeConverter,
                                             RewritePatternSet &patterns) {
   patterns
-      .insert<ConvertTensorConstantOp,
+      .insert<ConvertTensorConstantOp, ConvertTensorDynamicConstantOp,
               ConvertTensorCastLikeOp<IREE::Flow::TensorReshapeOp>,
               ConvertTensorCastLikeOp<IREE::Flow::TensorBitCastOp>,
               ConvertTensorAllocaOp, ConvertTensorEmptyOp, ConvertTensorSplatOp,
diff --git a/compiler/src/iree/compiler/Dialect/Stream/Conversion/FlowToStream/test/tensor_ops.mlir b/compiler/src/iree/compiler/Dialect/Stream/Conversion/FlowToStream/test/tensor_ops.mlir
index 756f319..029423b 100644
--- a/compiler/src/iree/compiler/Dialect/Stream/Conversion/FlowToStream/test/tensor_ops.mlir
+++ b/compiler/src/iree/compiler/Dialect/Stream/Conversion/FlowToStream/test/tensor_ops.mlir
@@ -12,6 +12,18 @@
 
 // -----
 
+// CHECK-LABEL: @tensorConstantParameter
+util.func public @tensorConstantParameter() -> tensor<4x2xi32> {
+  // CHECK-DAG: %[[CST:.+]] = stream.tensor.constant : tensor<4x2xi32> in !stream.resource<constant> = #stream.parameter.named<"scope"::"key"> : tensor<4x2xi32>
+  // CHECK-DAG: %[[SIZE:.+]] = stream.resource.size %[[CST]] : !stream.resource<constant>
+  // CHECK-DAG: %[[TRANSFER:.+]] = stream.async.transfer %[[CST]] : !stream.resource<constant>{%[[SIZE]]} -> !stream.resource<*>{%[[SIZE]]}
+  %cst = flow.tensor.constant #stream.parameter.named<"scope"::"key"> : tensor<4x2xi32>
+  // CHECK: util.return %[[TRANSFER]], %[[SIZE]]
+  util.return %cst : tensor<4x2xi32>
+}
+
+// -----
+
 // CHECK-LABEL: @tensorConstantDynamic
 util.func public @tensorConstantDynamic() -> tensor<?x?xi32> {
   // CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index
@@ -21,25 +33,13 @@
   // CHECK-DAG: %[[CST:.+]] = stream.tensor.constant : tensor<?x?xi32>{%[[D0]], %[[D1]]} in !stream.resource<constant> = dense<2> : tensor<2x4xi32>
   // CHECK-DAG: %[[SIZE:.+]] = stream.resource.size %[[CST]] : !stream.resource<constant>
   // CHECK-DAG: %[[TRANSFER:.+]] = stream.async.transfer %[[CST]] : !stream.resource<constant>{%[[SIZE]]} -> !stream.resource<*>{%[[SIZE]]}
-  %cst = flow.tensor.constant dense<2> : tensor<2x4xi32> -> tensor<?x?xi32>
+  %cst = flow.tensor.dynamic_constant dense<2> : tensor<2x4xi32> -> tensor<?x?xi32>
   // CHECK: util.return %[[TRANSFER]], %[[SIZE]]
   util.return %cst : tensor<?x?xi32>
 }
 
 // -----
 
-// CHECK-LABEL: @tensorConstantParameter
-util.func public @tensorConstantParameter() -> tensor<4x2xi32> {
-  // CHECK-DAG: %[[CST:.+]] = stream.tensor.constant : tensor<4x2xi32> in !stream.resource<constant> = #stream.parameter.named<"scope"::"key"> : tensor<4x2xi32>
-  // CHECK-DAG: %[[SIZE:.+]] = stream.resource.size %[[CST]] : !stream.resource<constant>
-  // CHECK-DAG: %[[TRANSFER:.+]] = stream.async.transfer %[[CST]] : !stream.resource<constant>{%[[SIZE]]} -> !stream.resource<*>{%[[SIZE]]}
-  %cst = flow.tensor.constant #stream.parameter.named<"scope"::"key"> : tensor<4x2xi32>
-  // CHECK: util.return %[[TRANSFER]], %[[SIZE]]
-  util.return %cst : tensor<4x2xi32>
-}
-
-// -----
-
 // CHECK-LABEL: @tensorReshapePassThrough
 //  CHECK-SAME: (%[[INPUT:.+]]: !stream.resource<*>, %[[INPUT_SIZE:.+]]: index)
 util.func public @tensorReshapePassThrough(%input: tensor<5x24x48xf32>) -> tensor<30x2x96xf32> {
diff --git a/tests/e2e/regression/dynamic_abs.mlir b/tests/e2e/regression/dynamic_abs.mlir
index 5749a86..95d0720 100644
--- a/tests/e2e/regression/dynamic_abs.mlir
+++ b/tests/e2e/regression/dynamic_abs.mlir
@@ -1,5 +1,5 @@
 func.func @dynamic_tensor() {
-  %input = flow.tensor.constant dense<[[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0]]> : tensor<2x3xf32> -> tensor<?x?xf32>
+  %input = flow.tensor.dynamic_constant dense<[[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0]]> : tensor<2x3xf32> -> tensor<?x?xf32>
   %res = stablehlo.abs %input : tensor<?x?xf32>
   %dshape = util.optimization_barrier %res : tensor<?x?xf32>
   %result = tensor.cast %dshape : tensor<?x?xf32> to tensor<2x3xf32>
diff --git a/tests/e2e/regression/dynamic_add.mlir b/tests/e2e/regression/dynamic_add.mlir
index 318513b..a0fad0d 100644
--- a/tests/e2e/regression/dynamic_add.mlir
+++ b/tests/e2e/regression/dynamic_add.mlir
@@ -1,6 +1,6 @@
 func.func @main() {
-  %lhs = flow.tensor.constant dense<[[1.0,2.0,3.0,4.0],[-1.0,-2.0,-3.0,-4.0]]> : tensor<2x4xf32> -> tensor<?x4xf32>
-  %rhs = flow.tensor.constant dense<[[5.0,6.0,7.0,8.0],[-5.0,-6.0,-7.0,-8.0]]> : tensor<2x4xf32> -> tensor<?x4xf32>
+  %lhs = flow.tensor.dynamic_constant dense<[[1.0,2.0,3.0,4.0],[-1.0,-2.0,-3.0,-4.0]]> : tensor<2x4xf32> -> tensor<?x4xf32>
+  %rhs = flow.tensor.dynamic_constant dense<[[5.0,6.0,7.0,8.0],[-5.0,-6.0,-7.0,-8.0]]> : tensor<2x4xf32> -> tensor<?x4xf32>
   %2 = stablehlo.add %lhs, %rhs : tensor<?x4xf32>
   %3 = util.optimization_barrier %2 : tensor<?x4xf32>
   %result = tensor.cast %3 : tensor<?x4xf32> to tensor<2x4xf32>
diff --git a/tests/e2e/regression/dynamic_dot.mlir b/tests/e2e/regression/dynamic_dot.mlir
index 041993e..6b7bd20 100644
--- a/tests/e2e/regression/dynamic_dot.mlir
+++ b/tests/e2e/regression/dynamic_dot.mlir
@@ -1,11 +1,11 @@
 func.func @dynamic_dot() {
-  %lhs = flow.tensor.constant dense<[
+  %lhs = flow.tensor.dynamic_constant dense<[
     [15.0, 14.0, 13.0],
     [12.0, 11.0, 10.0],
     [09.0, 08.0, 07.0],
     [06.0, 05.0, 04.0],
     [03.0, 02.0, 01.0]]> : tensor<5x3xf32> -> tensor<?x?xf32>
-  %rhs = flow.tensor.constant dense<[
+  %rhs = flow.tensor.dynamic_constant dense<[
     [15.0, 14.0, 13.0, 12.0, 11.0],
     [10.0, 09.0, 08.0, 07.0, 06.0],
     [05.0, 04.0, 03.0, 02.0, 01.0]]> : tensor<3x5xf32> -> tensor<?x?xf32>
diff --git a/tests/e2e/regression/dynamic_reduce_min.mlir b/tests/e2e/regression/dynamic_reduce_min.mlir
index 382d268..f484cec 100644
--- a/tests/e2e/regression/dynamic_reduce_min.mlir
+++ b/tests/e2e/regression/dynamic_reduce_min.mlir
@@ -1,5 +1,5 @@
 func.func @reduce_min() {
-  %input = flow.tensor.constant
+  %input = flow.tensor.dynamic_constant
     dense<[[1.0, 2.0, 3.0, 4.0],[-1.0 ,-2.0 ,-3.0 ,-4.0]]> : tensor<2x4xf32> -> tensor<?x?xf32>
   %0 = stablehlo.constant dense<0x7F800000> : tensor<f32>
   %1 = "stablehlo.reduce"(%input, %0) ( {
diff --git a/tests/e2e/regression/dynamic_torch_index_select_high_rank.mlir b/tests/e2e/regression/dynamic_torch_index_select_high_rank.mlir
index 501b242..ccfed09 100644
--- a/tests/e2e/regression/dynamic_torch_index_select_high_rank.mlir
+++ b/tests/e2e/regression/dynamic_torch_index_select_high_rank.mlir
@@ -1,6 +1,6 @@
 func.func @torch_index_select1() {
-  %lhs = flow.tensor.constant dense<[[6,7],[8,9]]> : tensor<2x2xi32> -> tensor<?x?xi32>
-  %rhs = flow.tensor.constant dense<[[[[0,1],[1,0]],[[0,0],[1,1]]],[[[1,1],[0,0]],[[0,1],[1,0]]]]> : tensor<2x2x2x2xi32> -> tensor<?x?x?x?xi32>
+  %lhs = flow.tensor.dynamic_constant dense<[[6,7],[8,9]]> : tensor<2x2xi32> -> tensor<?x?xi32>
+  %rhs = flow.tensor.dynamic_constant dense<[[[[0,1],[1,0]],[[0,0],[1,1]]],[[[1,1],[0,0]],[[0,1],[1,0]]]]> : tensor<2x2x2x2xi32> -> tensor<?x?x?x?xi32>
   %0 = "stablehlo.torch_index_select"(%lhs, %rhs) {batch_dims = 1 : i64, dim = 1 : i64} : (tensor<?x?xi32>, tensor<?x?x?x?xi32>) -> tensor<?x?x?x?xi32>
   %dshape = util.optimization_barrier %0 : tensor<?x?x?x?xi32>
   %result = tensor.cast %dshape : tensor<?x?x?x?xi32> to tensor<2x2x2x2xi32>
@@ -13,8 +13,8 @@
 }
 
 func.func @torch_index_select2() {
-  %lhs = flow.tensor.constant dense<[[6,7],[8,9]]> : tensor<2x2xi32> -> tensor<?x?xi32>
-  %rhs = flow.tensor.constant dense<[[[[0,1],[1,0]],[[0,0],[1,1]]],[[[1,1],[0,0]],[[0,1],[1,0]]]]> : tensor<2x2x2x2xi32> -> tensor<?x?x?x?xi32>
+  %lhs = flow.tensor.dynamic_constant dense<[[6,7],[8,9]]> : tensor<2x2xi32> -> tensor<?x?xi32>
+  %rhs = flow.tensor.dynamic_constant dense<[[[[0,1],[1,0]],[[0,0],[1,1]]],[[[1,1],[0,0]],[[0,1],[1,0]]]]> : tensor<2x2x2x2xi32> -> tensor<?x?x?x?xi32>
   %0 = "stablehlo.torch_index_select"(%lhs, %rhs) {batch_dims = 0 : i64, dim = 0 : i64} : (tensor<?x?xi32>, tensor<?x?x?x?xi32>) -> tensor<?x?x?x?x?xi32>
   %dshape = util.optimization_barrier %0 : tensor<?x?x?x?x?xi32>
   %result = tensor.cast %dshape : tensor<?x?x?x?x?xi32> to tensor<2x2x2x2x2xi32>
diff --git a/tests/e2e/regression/dynamic_torch_index_select_negative.mlir b/tests/e2e/regression/dynamic_torch_index_select_negative.mlir
index c2a4a60..9e4e6d5 100644
--- a/tests/e2e/regression/dynamic_torch_index_select_negative.mlir
+++ b/tests/e2e/regression/dynamic_torch_index_select_negative.mlir
@@ -1,6 +1,6 @@
 func.func @torch_index_select1() {
-  %lhs = flow.tensor.constant dense<[[[100, 101],[110, 111]],[[200, 201],[210, 211]]]> : tensor<2x2x2xi32> -> tensor<?x?x?xi32>
-  %rhs = flow.tensor.constant dense<[[[0, 1],[1, 0]],[[0, 0],[1, 1]]]> : tensor<2x2x2xi32> -> tensor<?x?x?xi32>
+  %lhs = flow.tensor.dynamic_constant dense<[[[100, 101],[110, 111]],[[200, 201],[210, 211]]]> : tensor<2x2x2xi32> -> tensor<?x?x?xi32>
+  %rhs = flow.tensor.dynamic_constant dense<[[[0, 1],[1, 0]],[[0, 0],[1, 1]]]> : tensor<2x2x2xi32> -> tensor<?x?x?xi32>
   %0 = "stablehlo.torch_index_select"(%lhs, %rhs) {batch_dims = -1 : i64, dim = -1 : i64} : (tensor<?x?x?xi32>, tensor<?x?x?xi32>) -> tensor<?x?x?xi32>
   %dshape = util.optimization_barrier %0 : tensor<?x?x?xi32>
   %result = tensor.cast %dshape : tensor<?x?x?xi32> to tensor<2x2x2xi32>
diff --git a/tests/e2e/regression/dynamic_torch_index_select_scalar.mlir b/tests/e2e/regression/dynamic_torch_index_select_scalar.mlir
index 06c5540..b77cc51 100644
--- a/tests/e2e/regression/dynamic_torch_index_select_scalar.mlir
+++ b/tests/e2e/regression/dynamic_torch_index_select_scalar.mlir
@@ -1,5 +1,5 @@
 func.func @torch_index_select1() {
-  %lhs = flow.tensor.constant
+  %lhs = flow.tensor.dynamic_constant
     dense<[[[1,2,3,4,5]],
            [[6,7,8,9,10]],
            [[11,12,13,14,15]],
@@ -15,7 +15,7 @@
 }
 
 func.func @torch_index_select2() {
-   %lhs = flow.tensor.constant
+   %lhs = flow.tensor.dynamic_constant
     dense<[[[1,2,3,4,5]],
            [[6,7,8,9,10]],
            [[11,12,13,14,15]],
diff --git a/tests/e2e/regression/dynamic_torch_index_select_vector.mlir b/tests/e2e/regression/dynamic_torch_index_select_vector.mlir
index c0662c4..57cfa28 100644
--- a/tests/e2e/regression/dynamic_torch_index_select_vector.mlir
+++ b/tests/e2e/regression/dynamic_torch_index_select_vector.mlir
@@ -1,7 +1,7 @@
 func.func @torch_index_select1() {
-  %lhs = flow.tensor.constant
+  %lhs = flow.tensor.dynamic_constant
     dense<[[[1, 2],[3, 4]],[[5, 6],[7, 8]],[[9, 10],[11, 12]]]> : tensor<3x2x2xi32> -> tensor<?x?x?xi32>
-  %rhs = flow.tensor.constant dense<[0, 1]> : tensor<2xi32> -> tensor<?xi32>
+  %rhs = flow.tensor.dynamic_constant dense<[0, 1]> : tensor<2xi32> -> tensor<?xi32>
   %0 = "stablehlo.torch_index_select"(%lhs, %rhs) {batch_dims = 0 : i64, dim = 1 : i64} : (tensor<?x?x?xi32>, tensor<?xi32>) -> tensor<?x?x?xi32>
   %dshape = util.optimization_barrier %0 : tensor<?x?x?xi32>
   %result = tensor.cast %dshape : tensor<?x?x?xi32> to tensor<3x2x2xi32>
@@ -13,9 +13,9 @@
 }
 
 func.func @torch_index_select2() {
-  %lhs = flow.tensor.constant
+  %lhs = flow.tensor.dynamic_constant
     dense<[[[1, 2],[3, 4]],[[5, 6],[7, 8]],[[9, 10],[11, 12]]]> : tensor<3x2x2xi32> -> tensor<?x?x?xi32>
-  %rhs = flow.tensor.constant dense<[0, 1]> : tensor<2xi32> -> tensor<?xi32>
+  %rhs = flow.tensor.dynamic_constant dense<[0, 1]> : tensor<2xi32> -> tensor<?xi32>
   %0 = "stablehlo.torch_index_select"(%lhs, %rhs) {batch_dims = 0 : i64, dim = 0 : i64} : (tensor<?x?x?xi32>, tensor<?xi32>) -> tensor<?x?x?xi32>
   %dshape = util.optimization_barrier %0 : tensor<?x?x?xi32>
   %result = tensor.cast %dshape : tensor<?x?x?xi32> to tensor<2x2x2xi32>
diff --git a/tests/e2e/regression/layernorm.mlir b/tests/e2e/regression/layernorm.mlir
index 5cbc0df..15f02dec 100644
--- a/tests/e2e/regression/layernorm.mlir
+++ b/tests/e2e/regression/layernorm.mlir
@@ -76,9 +76,9 @@
   %cst = arith.constant 1.000000e+00 : f32
   %cst_0 = arith.constant 0.000000e+00 : f32
   %cst_1 = arith.constant dense<0.000000e+00> : tensor<128x384xf32>
-  %cst_2 = flow.tensor.constant dense<9.99999996E-13> : tensor<128x1xf32> -> tensor<?x1xf32>
-  %cst_3 = flow.tensor.constant dense<3.840000e+02> : tensor<128x1xf32> -> tensor<?x1xf32>
-  %cst_4 = flow.tensor.constant dense<5.000000e+00> : tensor<128x384xf32> -> tensor<?x?xf32>
+  %cst_2 = flow.tensor.dynamic_constant dense<9.99999996E-13> : tensor<128x1xf32> -> tensor<?x1xf32>
+  %cst_3 = flow.tensor.dynamic_constant dense<3.840000e+02> : tensor<128x1xf32> -> tensor<?x1xf32>
+  %cst_4 = flow.tensor.dynamic_constant dense<5.000000e+00> : tensor<128x384xf32> -> tensor<?x?xf32>
   %c_0_index = arith.constant 0 : index
   %c_1_index = arith.constant 1 : index
   %dim_0 = tensor.dim %cst_4, %c_0_index : tensor<?x?xf32>
diff --git a/tests/e2e/regression/reduction_broadcast_elementwise.mlir b/tests/e2e/regression/reduction_broadcast_elementwise.mlir
index 1ce6bd5..38c108c 100644
--- a/tests/e2e/regression/reduction_broadcast_elementwise.mlir
+++ b/tests/e2e/regression/reduction_broadcast_elementwise.mlir
@@ -40,7 +40,7 @@
 func.func @max_sub_exp_dynamic() {
   %cst = arith.constant -3.40282347E+38 : f32
   %cst_0 = arith.constant dense<1.000000e+00> : tensor<12x128x128xf32>
-  %cst_1 = flow.tensor.constant dense<5.000000e+00> : tensor<12x128x128xf32> -> tensor<?x?x?xf32>
+  %cst_1 = flow.tensor.dynamic_constant dense<5.000000e+00> : tensor<12x128x128xf32> -> tensor<?x?x?xf32>
   %c_0_index = arith.constant 0 : index
   %c_1_index = arith.constant 1 : index
   %c_2_index = arith.constant 2 : index
diff --git a/tests/e2e/regression/softmax.mlir b/tests/e2e/regression/softmax.mlir
index fb77441..19fb5bc 100644
--- a/tests/e2e/regression/softmax.mlir
+++ b/tests/e2e/regression/softmax.mlir
@@ -62,7 +62,7 @@
   %cst_0 = arith.constant 0.000000e+00 : f32
   %cst_1 = arith.constant -3.40282347E+38 : f32
   %cst_2 = arith.constant dense<7.812500e-03> : tensor<12x128x128xf32>
-  %cst_3 = flow.tensor.constant dense<5.000000e+00> : tensor<12x128x128xf32> -> tensor<?x?x?xf32>
+  %cst_3 = flow.tensor.dynamic_constant dense<5.000000e+00> : tensor<12x128x128xf32> -> tensor<?x?x?xf32>
   %c_0_index = arith.constant 0 : index
   %c_1_index = arith.constant 1 : index
   %c_2_index = arith.constant 2 : index
diff --git a/tests/e2e/stablehlo_ops/scatter_dynamic.mlir b/tests/e2e/stablehlo_ops/scatter_dynamic.mlir
index 50ae47c..0127585 100644
--- a/tests/e2e/stablehlo_ops/scatter_dynamic.mlir
+++ b/tests/e2e/stablehlo_ops/scatter_dynamic.mlir
@@ -1,7 +1,7 @@
 func.func @scatter_add_slice_2D_dynamic_num_updates() {
   %arg0 = util.unfoldable_constant dense<1> : tensor<6x3xi32>
-  %arg1 = flow.tensor.constant dense<[[2], [4]]> : tensor<2x1xi32> -> tensor<?x1xi32>
-  %arg2 = flow.tensor.constant dense<[[1, 2, 3],
+  %arg1 = flow.tensor.dynamic_constant dense<[[2], [4]]> : tensor<2x1xi32> -> tensor<?x1xi32>
+  %arg2 = flow.tensor.dynamic_constant dense<[[1, 2, 3],
                                              [4, 5, 6]]> : tensor<2x3xi32> -> tensor<?x3xi32>
   %0 = "stablehlo.scatter"(%arg0, %arg1, %arg2) ( {
   ^bb0(%arg3: tensor<i32>, %arg4: tensor<i32>):  // no predecessors
diff --git a/tests/e2e/tensor_ops/pack.mlir b/tests/e2e/tensor_ops/pack.mlir
index 3837d2e..c2aa676 100644
--- a/tests/e2e/tensor_ops/pack.mlir
+++ b/tests/e2e/tensor_ops/pack.mlir
@@ -28,7 +28,7 @@
 }
 
 func.func @dynamic_pack_simple() {
-  %iree_input = flow.tensor.constant dense<[
+  %iree_input = flow.tensor.dynamic_constant dense<[
     [0, 1, 2, 3],
     [4, 5, 6, 7],
     [8, 9, 10, 11],
@@ -69,7 +69,7 @@
 }
 
 func.func @dynamic_pack_simple_pad_mode() {
-  %iree_input = flow.tensor.constant dense<[
+  %iree_input = flow.tensor.dynamic_constant dense<[
     [0, 1, 2, 3],
     [4, 5, 6, 7],
     [8, 9, 10, 11],
diff --git a/tests/e2e/tensor_ops/pack_dynamic_inner_tiles.mlir b/tests/e2e/tensor_ops/pack_dynamic_inner_tiles.mlir
index a2b1b5c..d75eca5 100644
--- a/tests/e2e/tensor_ops/pack_dynamic_inner_tiles.mlir
+++ b/tests/e2e/tensor_ops/pack_dynamic_inner_tiles.mlir
@@ -19,7 +19,7 @@
 }
 
 func.func @fully_dynamic_pack_simple() {
-  %iree_input = flow.tensor.constant dense<[
+  %iree_input = flow.tensor.dynamic_constant dense<[
     [0, 1, 2, 3],
     [4, 5, 6, 7],
     [8, 9, 10, 11],
diff --git a/tests/e2e/tensor_ops/unpack.mlir b/tests/e2e/tensor_ops/unpack.mlir
index b187b4f..e5b0df4 100644
--- a/tests/e2e/tensor_ops/unpack.mlir
+++ b/tests/e2e/tensor_ops/unpack.mlir
@@ -31,7 +31,7 @@
 }
 
 func.func @dynamic_unpack_simple() {
-  %iree_input = flow.tensor.constant dense<[[[[0, 1], [4, 5]], [[2, 3], [6, 7]]], [[[8, 9], [12, 13]], [[10 ,11], [14, 15]]]]> : tensor<2x2x2x2xi32> -> tensor<?x?x2x2xi32>
+  %iree_input = flow.tensor.dynamic_constant dense<[[[[0, 1], [4, 5]], [[2, 3], [6, 7]]], [[[8, 9], [12, 13]], [[10 ,11], [14, 15]]]]> : tensor<2x2x2x2xi32> -> tensor<?x?x2x2xi32>
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index
@@ -60,7 +60,7 @@
 }
 
 func.func @dynamic_unpack_simple_extract_slice() {
-  %iree_input = flow.tensor.constant dense<[[[[0, 1, 2], [4, 5, 6], [8, 9, 10]],
+  %iree_input = flow.tensor.dynamic_constant dense<[[[[0, 1, 2], [4, 5, 6], [8, 9, 10]],
                                        [[3, 0, 0], [7, 0, 0], [11, 0, 0]]],
                                       [[[12, 13, 14], [0, 0, 0], [0, 0, 0]],
                                        [[15, 0, 0], [0, 0, 0], [0, 0, 0]]]]> : tensor<2x2x3x3xi32> -> tensor<?x?x3x3xi32>
diff --git a/tests/microbenchmarks/dynamic_shape_vectorization.mlir b/tests/microbenchmarks/dynamic_shape_vectorization.mlir
index f7ce096..be10d3a 100644
--- a/tests/microbenchmarks/dynamic_shape_vectorization.mlir
+++ b/tests/microbenchmarks/dynamic_shape_vectorization.mlir
@@ -14,9 +14,9 @@
   %dim1 = util.unfoldable_constant 513 : index
   %dim2 = util.unfoldable_constant 385 : index
 
-  %A = flow.tensor.constant dense<1.0> : tensor<513x257xf32> -> tensor<?x?xf32>
-  %B = flow.tensor.constant dense<2.0> : tensor<257x385xf32> -> tensor<?x?xf32>
-  %C = flow.tensor.constant dense<0.0> : tensor<513x385xf32> -> tensor<?x?xf32>
+  %A = flow.tensor.dynamic_constant dense<1.0> : tensor<513x257xf32> -> tensor<?x?xf32>
+  %B = flow.tensor.dynamic_constant dense<2.0> : tensor<257x385xf32> -> tensor<?x?xf32>
+  %C = flow.tensor.dynamic_constant dense<0.0> : tensor<513x385xf32> -> tensor<?x?xf32>
 
   %gemm = linalg.matmul
       ins(%A, %B : tensor<?x?xf32>, tensor<?x?xf32>)
@@ -26,9 +26,9 @@
 
 func.func @dynamic_elw() -> tensor<?x?xf32> {
   %c0 = arith.constant 0.000000e+00 : f32
-  %A = flow.tensor.constant dense<1.0> : tensor<513x1025xf32> -> tensor<?x?xf32>
-  %B = flow.tensor.constant dense<2.0> : tensor<513x1025xf32> -> tensor<?x?xf32>
-  %C = flow.tensor.constant dense<0.0> : tensor<513x1025xf32> -> tensor<?x?xf32>
+  %A = flow.tensor.dynamic_constant dense<1.0> : tensor<513x1025xf32> -> tensor<?x?xf32>
+  %B = flow.tensor.dynamic_constant dense<2.0> : tensor<513x1025xf32> -> tensor<?x?xf32>
+  %C = flow.tensor.dynamic_constant dense<0.0> : tensor<513x1025xf32> -> tensor<?x?xf32>
 
   %gen = linalg.generic {
       indexing_maps = [
diff --git a/tools/test/iree-run-module-outputs.mlir b/tools/test/iree-run-module-outputs.mlir
index fc3157f..66765cf 100644
--- a/tools/test/iree-run-module-outputs.mlir
+++ b/tools/test/iree-run-module-outputs.mlir
@@ -24,7 +24,7 @@
   %1 = arith.constant dense<4.0> : tensor<f32>
   // OUTPUT-DEFAULT: result[2]: hal.buffer_view
   // OUTPUT-DEFAULT-NEXT: 2x4xi32=[0 1 2 3][4 5 6 7]
-  %2 = flow.tensor.constant dense<[[0,1,2,3],[4,5,6,7]]> : tensor<2x4xi32> -> tensor<?x4xi32>
+  %2 = flow.tensor.dynamic_constant dense<[[0,1,2,3],[4,5,6,7]]> : tensor<2x4xi32> -> tensor<?x4xi32>
   return %0, %1, %2 : i32, tensor<f32>, tensor<?x4xi32>
 }
 
@@ -48,7 +48,7 @@
   %1 = arith.constant dense<4.0> : tensor<f32>
   // OUTPUT-NUMPY-NEXT{LITERAL}: [[0 1 2 3]
   // OUTPUT-NUMPY-NEXT{LITERAL}:  [4 5 6 7]]
-  %2 = flow.tensor.constant dense<[[0,1,2,3],[4,5,6,7]]> : tensor<2x4xi32> -> tensor<?x4xi32>
+  %2 = flow.tensor.dynamic_constant dense<[[0,1,2,3],[4,5,6,7]]> : tensor<2x4xi32> -> tensor<?x4xi32>
   return %0, %1, %2 : i32, tensor<f32>, tensor<?x4xi32>
 }
 
@@ -90,7 +90,7 @@
 
 func.func @write_binary() -> (tensor<f32>, tensor<?x4xi32>) {
   %0 = arith.constant dense<4.0> : tensor<f32>
-  %1 = flow.tensor.constant dense<[[0,1,2,3],[4,5,6,7]]> : tensor<2x4xi32> -> tensor<?x4xi32>
+  %1 = flow.tensor.dynamic_constant dense<[[0,1,2,3],[4,5,6,7]]> : tensor<2x4xi32> -> tensor<?x4xi32>
   return %0, %1 : tensor<f32>, tensor<?x4xi32>
 }
 func.func @echo_binary(%arg0: tensor<f32>, %arg1: tensor<?x4xi32>) -> (tensor<f32>, tensor<?x4xi32>) {