Adds torch-mlir dequant ops to the lowering pipeline (#15128)
These should be fine even if we don't have dequant ops
diff --git a/compiler/plugins/input/Torch/torch-iree/InputConversion/Passes.cpp b/compiler/plugins/input/Torch/torch-iree/InputConversion/Passes.cpp
index b943851..fdafc92 100644
--- a/compiler/plugins/input/Torch/torch-iree/InputConversion/Passes.cpp
+++ b/compiler/plugins/input/Torch/torch-iree/InputConversion/Passes.cpp
@@ -42,7 +42,10 @@
// now be simplified.
pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
}
-
+ pm.addNestedPass<func::FuncOp>(
+ torch::TorchConversion::createUnpackQuantTensorPass());
+ pm.addNestedPass<func::FuncOp>(
+ mlir::torch::TorchConversion::createConvertCustomQuantOpPass());
pm.addNestedPass<func::FuncOp>(
torch::Torch::createDecomposeComplexOpsPass(emptyArrayRef));
pm.addNestedPass<func::FuncOp>(torch::createConvertTorchToTMTensorPass());