Format issue TODOs to be consistent. (#7492)

diff --git a/integrations/tensorflow/e2e/keras/layers/BUILD b/integrations/tensorflow/e2e/keras/layers/BUILD
index 57b41eb..86950b3 100644
--- a/integrations/tensorflow/e2e/keras/layers/BUILD
+++ b/integrations/tensorflow/e2e/keras/layers/BUILD
@@ -155,7 +155,7 @@
             "AveragePooling3D",
             "Masking",
             "MaxPool3D",
-            "LocallyConnected1D",  # TODO(GH-5310): Enable the test.
+            "LocallyConnected1D",  # TODO(#5310): Enable the test.
         ],
         "target_backends": [
             "iree_llvmaot",
@@ -170,8 +170,8 @@
             "Lambda",
             "MaxPool1D",
             "MaxPool2D",
-            "Conv3D",  #TODO(GH-5150): Enable the test.
-            "Conv3DTranspose",  #TODO(GH-5150): Enable the test.
+            "Conv3D",  #TODO(#5150): Enable the test.
+            "Conv3DTranspose",  #TODO(#5150): Enable the test.
         ],
         "target_backends": "iree_vulkan",
     },
@@ -285,7 +285,7 @@
             "AveragePooling3D",
             "Conv1DTranspose",
             "Conv2DTranspose",
-            "Conv3D",  #TODO(GH-5150): Enable the test.
+            "Conv3D",  #TODO(#5150): Enable the test.
             "MaxPool1D",
             "MaxPool2D",
             "MaxPool3D",
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/Passes.cpp b/integrations/tensorflow/iree_tf_compiler/TF/Passes.cpp
index e5d0f42..8cb4d3d 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/Passes.cpp
+++ b/integrations/tensorflow/iree_tf_compiler/TF/Passes.cpp
@@ -80,7 +80,7 @@
   // Lowering shape-related constructs.
   //----------------------------------------------------------------------------
   // pm.addPass(iree_compiler::Shape::createConvertHLOToShapePass());
-  // TODO(GH-2277): Lower HLO shape constraints instead of eliding them here.
+  // TODO(#2277): Lower HLO shape constraints instead of eliding them here.
   pm.addPass(createRemoveShapeConstraintsPass());
   pm.addPass(createCanonicalizerPass());
   // pm.addPass(iree_compiler::Shape::createConvertShapeToShapexPass());
diff --git a/iree/compiler/Dialect/Flow/IR/FlowOps.cpp b/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
index 614093a..374c6c7 100644
--- a/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
+++ b/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
@@ -336,7 +336,7 @@
       return true;
     } else if (auto denseAttr =
                    constantValueAttr.dyn_cast<DenseElementsAttr>()) {
-      // TODO(GH-4897): Non-splat constants seems to have an issue on the LLVM
+      // TODO(#4897): Non-splat constants seems to have an issue on the LLVM
       // side. Uncomment after that is fixed.
       auto shapedType = constantOp.getType().cast<ShapedType>();
       uint64_t estimatedByteLength =
diff --git a/iree/compiler/Dialect/Flow/Transforms/FusionOfTensorOps.cpp b/iree/compiler/Dialect/Flow/Transforms/FusionOfTensorOps.cpp
index ba0869b..a969b19 100644
--- a/iree/compiler/Dialect/Flow/Transforms/FusionOfTensorOps.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/FusionOfTensorOps.cpp
@@ -55,10 +55,10 @@
           Operation *producer = producerResult.getOwner();
           Operation *consumer = consumerOperand.getOwner();
 
-          // TODO(GH-5611): Enable fusion with reduction consumer for all
-          // targets. Currently vectorization doesn't handle generic ops with
-          // reduction iterators we will disable for now to allow vectorizing
-          // producer pointwise ops to avoid performance regressions on CPU.
+          // TODO(#5611): Enable fusion with reduction consumer for all targets.
+          // Currently vectorization doesn't handle generic ops with reduction
+          // iterators we will disable for now to allow vectorizing producer
+          // pointwise ops to avoid performance regressions on CPU.
           if (!clEnableFusionWithReductionOps) {
             if (auto genericOp = dyn_cast<linalg::GenericOp>(consumer)) {
               if (genericOp.getNumReductionLoops()) return false;
diff --git a/iree/test/e2e/vulkan_specific/dot_general.mlir b/iree/test/e2e/vulkan_specific/dot_general.mlir
index f0ef4b0..a5098f4 100644
--- a/iree/test/e2e/vulkan_specific/dot_general.mlir
+++ b/iree/test/e2e/vulkan_specific/dot_general.mlir
@@ -48,7 +48,7 @@
   return
 }
 
-// TODO(GH-6070): Re-enable this after fixing wrong answer on Pixel 4.
+// TODO(#6070): Re-enable this after fixing wrong answer on Pixel 4.
 // func @large_dot_general2() {
 //   %lhs = util.unfoldable_constant dense<1.0> : tensor<4x32x1024xf32>
 //   %rhs = util.unfoldable_constant dense<0.4> : tensor<4x1024x64xf32>
diff --git a/iree/test/e2e/xla_ops/BUILD b/iree/test/e2e/xla_ops/BUILD
index 5d735f5..419658a 100644
--- a/iree/test/e2e/xla_ops/BUILD
+++ b/iree/test/e2e/xla_ops/BUILD
@@ -158,7 +158,7 @@
         ],
         include = ["*.mlir"],
         exclude = [
-            #TODO(GH-7415): Enable this after failures with bazel bot
+            #TODO(#7415): Enable this after failures with bazel bot
             #and cmake-swiftshared bot are addressed
             "pow.mlir",
             "round.mlir",
@@ -298,13 +298,13 @@
         ],
         include = ["*.mlir"],
         exclude = [
-            "fft.mlir",  # TODO(GH-6601): Eanble the test.
+            "fft.mlir",  # TODO(#6601): Eanble the test.
             "round.mlir",
-            "scatter.mlir",  # TODO(GH-6601): Enable the test.
-            "scatter_dynamic.mlir",  # TODO(GH-6601): Enable the test.
+            "scatter.mlir",  # TODO(#6601): Enable the test.
+            "scatter_dynamic.mlir",  # TODO(#6601): Enable the test.
             "sort.mlir",
-            "rng_uniform.mlir",  # TODO(GH-6995): Enable the test.
-            "rng_normal.mlir",  # TODO(GH-6995): Enable the test.
+            "rng_uniform.mlir",  # TODO(#6995): Enable the test.
+            "rng_normal.mlir",  # TODO(#6995): Enable the test.
         ],
     ),
     compiler_flags = ["-iree-input-type=mhlo"],
diff --git a/iree/vm/test/call_ops.mlir b/iree/vm/test/call_ops.mlir
index 536a53b..c0ae804 100644
--- a/iree/vm/test/call_ops.mlir
+++ b/iree/vm/test/call_ops.mlir
@@ -15,7 +15,7 @@
     vm.return
   }
 
-  // TODO(GH-7487): Enable the test for emitc.
+  // TODO(#7487): Enable the test for emitc.
   vm.export @test_call_r_v attributes {emitc.exclude}
   vm.func private @test_call_r_v() {
     %ref = vm.const.ref.zero : !vm.ref<?>
@@ -24,7 +24,7 @@
   }
 
   // Check that reused ref argument slots are handled properly
-  // TODO(GH-7487): Enable the test for emitc.
+  // TODO(#7487): Enable the test for emitc.
   vm.export @test_call_r_v_reuse_reg attributes {emitc.exclude}
   vm.func private @test_call_r_v_reuse_reg() {
     %ref = vm.const.ref.zero : !vm.buffer
@@ -39,7 +39,7 @@
   // of the tests during the lattter. This means we would need to add a pattern
   // that inserts calls to `iree_vm_ref_retain` for operand/result pairs of the
   // do_not_optimize op.
-  // TODO(GH-7487): Enable the test for emitc.
+  // TODO(#7487): Enable the test for emitc.
   vm.export @test_call_r_v_preserve_ref attributes {emitc.exclude}
   vm.func private @test_call_r_v_preserve_ref() {
     %ref = vm.const.ref.zero : !vm.buffer
diff --git a/iree/vm/test/control_flow_ops.mlir b/iree/vm/test/control_flow_ops.mlir
index 4aeeb0c..2b6d146 100644
--- a/iree/vm/test/control_flow_ops.mlir
+++ b/iree/vm/test/control_flow_ops.mlir
@@ -71,7 +71,7 @@
     vm.fail %code, "unreachable!"
   }
 
-  // TODO(GH-7487): Enable the test for emitc.
+  // TODO(#7487): Enable the test for emitc.
   vm.export @test_cond_br_ref_arg attributes {emitc.exclude}
   vm.func private @test_cond_br_ref_arg() {
     %c1 = vm.const.i32 1 : i32