Integrate LLVM @ 7900daaa7ba57b5f9729bbbdb54f4e0599a45cd7 (#18773)

- Also bump torch-mlir to
[edd1bbec46fc08318163c9dc0eb45decee63ec5b](https://github.com/llvm/torch-mlir/tree/edd1bbec46fc08318163c9dc0eb45decee63ec5b).
- No local patch is carried.
- Most of the changes are made due to
https://github.com/llvm/llvm-project/pull/110344

---------

Signed-off-by: yzhang93 <zhyuhang88@gmail.com>
diff --git a/compiler/plugins/input/TOSA/InputConversion/Passes.cpp b/compiler/plugins/input/TOSA/InputConversion/Passes.cpp
index 65d5ed2..4763e44 100644
--- a/compiler/plugins/input/TOSA/InputConversion/Passes.cpp
+++ b/compiler/plugins/input/TOSA/InputConversion/Passes.cpp
@@ -52,8 +52,10 @@
 
   TosaToLinalgNamedOptions tosaToLinalgNamedOptions;
   tosaToLinalgNamedOptions.preferConv2DKernelLayoutHWCF = true;
+  tosa::TosaValidationOptions tosaValidationOptions;
+  tosaValidationOptions.profile = {"bi", "mi", "mt"};
   tosa::addTosaToLinalgPasses(passManager, TosaToLinalgOptions(),
-                              tosaToLinalgNamedOptions);
+                              tosaToLinalgNamedOptions, tosaValidationOptions);
   passManager.addNestedPass<func::FuncOp>(
       iree_compiler::createConverti48Toi64Pass());
 
diff --git a/compiler/src/iree/compiler/Codegen/Common/VectorLayoutAnalysis.cpp b/compiler/src/iree/compiler/Codegen/Common/VectorLayoutAnalysis.cpp
index 0263614..fa3786c 100644
--- a/compiler/src/iree/compiler/Codegen/Common/VectorLayoutAnalysis.cpp
+++ b/compiler/src/iree/compiler/Codegen/Common/VectorLayoutAnalysis.cpp
@@ -122,7 +122,7 @@
 
   LogicalResult initialize(Operation *root) override;
 
-  LogicalResult visit(ProgramPoint point) override;
+  LogicalResult visit(ProgramPoint *point) override;
 
   void registerNewValue(Value val, const VectorLayoutInterface &layout);
 
@@ -147,7 +147,7 @@
 
   LogicalResult initialize(Operation *root) override;
 
-  LogicalResult visit(ProgramPoint point) override;
+  LogicalResult visit(ProgramPoint *point) override;
 
   /// Register a new value to be part of the dataflow analysis. The value should
   /// not be part of the analysis already. This is used for new values that are
@@ -308,7 +308,7 @@
   if (propagation) {
     // Make propagation run again on all users of this value.
     for (Operation *user : value.getUsers()) {
-      solver->enqueue({user, propagation});
+      solver->enqueue({solver->getProgramPointAfter(user), propagation});
     }
     // TODO: Maybe we need to run it on the parent operation as well to give
     // layout to other results? Seems unlikely though as results usually
@@ -318,17 +318,19 @@
   if (enforcement) {
     // Make enforcement run on the parent.
     if (Operation *definingOp = value.getDefiningOp()) {
-      solver->enqueue({definingOp, enforcement});
+      solver->enqueue({solver->getProgramPointAfter(definingOp), enforcement});
     } else {
       // TODO: This is not always correct. Ideally, we should enqueue all
       // predecessors of these block arguements.
-      solver->enqueue({value.getParentBlock()->getParentOp(), enforcement});
+      solver->enqueue(
+          {solver->getProgramPointAfter(value.getParentBlock()->getParentOp()),
+           enforcement});
     }
 
     // Enforce users of this value also, as some other operands may need to
     // be updated.
     for (Operation *user : value.getUsers()) {
-      solver->enqueue({user, enforcement});
+      solver->enqueue({solver->getProgramPointAfter(user), enforcement});
     }
   }
 }
@@ -849,8 +851,11 @@
   return success();
 }
 
-LogicalResult PropagateLayout::visit(ProgramPoint point) {
-  if (Operation *op = dyn_cast_or_null<Operation *>(point)) {
+LogicalResult PropagateLayout::visit(ProgramPoint *point) {
+  if (point->isBlockStart())
+    return success();
+
+  if (auto op = point->getPrevOp()) {
     visitOperation(op);
     return success();
   }
@@ -969,8 +974,11 @@
   return success();
 }
 
-LogicalResult EnforceLayout::visit(ProgramPoint point) {
-  if (Operation *op = dyn_cast_or_null<Operation *>(point)) {
+LogicalResult EnforceLayout::visit(ProgramPoint *point) {
+  if (point->isBlockStart())
+    return success();
+
+  if (auto op = point->getPrevOp()) {
     visitOperation(op);
     return success();
   }
diff --git a/compiler/src/iree/compiler/Dialect/Util/Transforms/OptimizeIntArithmetic.cpp b/compiler/src/iree/compiler/Dialect/Util/Transforms/OptimizeIntArithmetic.cpp
index 9a588a4..79e61a1 100644
--- a/compiler/src/iree/compiler/Dialect/Util/Transforms/OptimizeIntArithmetic.cpp
+++ b/compiler/src/iree/compiler/Dialect/Util/Transforms/OptimizeIntArithmetic.cpp
@@ -216,7 +216,7 @@
 
 protected:
   void notifyOperationErased(Operation *op) override {
-    s.eraseState(op);
+    s.eraseState(s.getProgramPointAfter(op));
     for (Value res : op->getResults())
       flushValue(res);
   }
diff --git a/compiler/src/iree/compiler/GlobalOptimization/PropagateLinalgTranspose.cpp b/compiler/src/iree/compiler/GlobalOptimization/PropagateLinalgTranspose.cpp
index 8fe7ed2..8462338 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/PropagateLinalgTranspose.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/PropagateLinalgTranspose.cpp
@@ -110,7 +110,8 @@
 // single input.
 static void specializeGenericTransposeOp(RewriterBase &rewriter,
                                          linalg::GenericOp genericOp) {
-  if (!isaTransposeOpInterface(genericOp)) {
+  if (!mlir::iree_compiler::GlobalOptimization::isaTransposeOpInterface(
+          genericOp)) {
     return;
   }
 
diff --git a/third_party/llvm-project b/third_party/llvm-project
index f8b7a65..7900daa 160000
--- a/third_party/llvm-project
+++ b/third_party/llvm-project
@@ -1 +1 @@
-Subproject commit f8b7a65395a07073feff367145965214d95ba99a
+Subproject commit 7900daaa7ba57b5f9729bbbdb54f4e0599a45cd7
diff --git a/third_party/torch-mlir b/third_party/torch-mlir
index ab62f35..edd1bbe 160000
--- a/third_party/torch-mlir
+++ b/third_party/torch-mlir
@@ -1 +1 @@
-Subproject commit ab62f35373c3944b68e564214fd04fff39dd92fc
+Subproject commit edd1bbec46fc08318163c9dc0eb45decee63ec5b