Standardize E2E module names. (#2872)

The class names of the E2E modules are used to index into their benchmarking artifacts, so they need to be distinct. This is currently untested, and there was a collision between `depth_conv_test` and `conv_test` since they both used `Conv2dModule`. This fixes that and gives other modules more specific names as a stopgap.

Additional NFC: use `os.makedirs(exist_ok=True)` instead of `try`/`catch`.
diff --git a/integrations/tensorflow/bindings/python/pyiree/tf/support/tf_test_utils.py b/integrations/tensorflow/bindings/python/pyiree/tf/support/tf_test_utils.py
index c3442c1..2e0e09e 100644
--- a/integrations/tensorflow/bindings/python/pyiree/tf/support/tf_test_utils.py
+++ b/integrations/tensorflow/bindings/python/pyiree/tf/support/tf_test_utils.py
@@ -58,7 +58,7 @@
     parent_dir = os.path.join(tempfile.gettempdir(), "iree", "modules")
   artifacts_dir = os.path.join(parent_dir, module_name)
   logging.info("Saving compilation artifacts and traces to '%s'", artifacts_dir)
-  tf_utils._makedirs(artifacts_dir)
+  os.makedirs(artifacts_dir, exist_ok=True)
   return artifacts_dir
 
 
@@ -309,7 +309,7 @@
   def _get_trace_dir(self, artifacts_dir):
     trace_dir = os.path.join(artifacts_dir, self.backend, "traces",
                              self.function_name)
-    tf_utils._makedirs(trace_dir)
+    os.makedirs(trace_dir, exist_ok=True)
     return trace_dir
 
   def save_plaintext(self, artifacts_dir, summarize=True):
diff --git a/integrations/tensorflow/bindings/python/pyiree/tf/support/tf_utils.py b/integrations/tensorflow/bindings/python/pyiree/tf/support/tf_utils.py
index b93541a..53ff24f 100644
--- a/integrations/tensorflow/bindings/python/pyiree/tf/support/tf_utils.py
+++ b/integrations/tensorflow/bindings/python/pyiree/tf/support/tf_utils.py
@@ -102,22 +102,12 @@
   # Put the artifact in a directory if there's only one backend.
   if len(backend_infos) == 1:
     backend_dir = os.path.join(artifacts_dir, backends_string)
-    _makedirs(backend_dir)
+    os.makedirs(backend_dir, exist_ok=True)
     return os.path.join(artifacts_dir, backends_string, artifact_name)
   else:
     return os.path.join(artifacts_dir, f"{artifact_name}__{backends_string}")
 
 
-def _makedirs(path):
-  # If the artifacts already exist then we overwrite/update them.
-  try:
-    # Use try/except instead of os.path.exists to address any race conditions
-    # that might arise between multiple tests targets.
-    os.makedirs(path)
-  except IOError:
-    pass
-
-
 def compile_tf_module(tf_module,
                       backend_infos=(),
                       exported_names=(),
diff --git a/integrations/tensorflow/e2e/depth_conv_test.py b/integrations/tensorflow/e2e/depth_conv_test.py
index fc3143d..e3ba303 100644
--- a/integrations/tensorflow/e2e/depth_conv_test.py
+++ b/integrations/tensorflow/e2e/depth_conv_test.py
@@ -19,7 +19,7 @@
 import tensorflow.compat.v2 as tf
 
 
-class Conv2dModule(tf.Module):
+class DepthConv2dModule(tf.Module):
 
   # TODO(ataei): Add dilation and strided tests.
   @tf.function(input_signature=[
@@ -63,7 +63,7 @@
         img, kernel, [1, 1, 1, 1], "SAME", name="result")
 
 
-@tf_test_utils.compile_module(Conv2dModule)
+@tf_test_utils.compile_module(DepthConv2dModule)
 class ConvTest(tf_test_utils.TracedModuleTestCase):
 
   def test_batched_feature_unpadded(self):
diff --git a/integrations/tensorflow/e2e/dynamic_mlp_relu_test.py b/integrations/tensorflow/e2e/dynamic_mlp_relu_test.py
index eec6815..3742c6e 100644
--- a/integrations/tensorflow/e2e/dynamic_mlp_relu_test.py
+++ b/integrations/tensorflow/e2e/dynamic_mlp_relu_test.py
@@ -28,7 +28,7 @@
 CLASSES = 10
 
 
-class MlpRelu(tf.Module):
+class DynamicMlpReluModule(tf.Module):
 
   def __init__(self,
                hidden_1_dim=256,
@@ -65,7 +65,7 @@
     return tf.nn.softmax(self.mlp(x))
 
 
-@tf_test_utils.compile_module(MlpRelu, exported_names=["predict"])
+@tf_test_utils.compile_module(DynamicMlpReluModule, exported_names=["predict"])
 class DynamicMlpReluTest(tf_test_utils.TracedModuleTestCase):
 
   def test_dynamic_batch(self):
diff --git a/integrations/tensorflow/e2e/dynamic_mlp_test.py b/integrations/tensorflow/e2e/dynamic_mlp_test.py
index 0b70e84..ff905a5 100644
--- a/integrations/tensorflow/e2e/dynamic_mlp_test.py
+++ b/integrations/tensorflow/e2e/dynamic_mlp_test.py
@@ -24,7 +24,7 @@
 CLASSES = 10
 
 
-class Mlp(tf.Module):
+class DynamicMlpModule(tf.Module):
 
   def __init__(self,
                hidden_1_dim=256,
@@ -61,7 +61,7 @@
     return tf.nn.softmax(self.mlp(x))
 
 
-@tf_test_utils.compile_module(Mlp, exported_names=["predict"])
+@tf_test_utils.compile_module(DynamicMlpModule, exported_names=["predict"])
 class DynamicMlpTest(tf_test_utils.TracedModuleTestCase):
 
   def test_dynamic_batch(self):
diff --git a/integrations/tensorflow/e2e/keras/lstm_static_test.py b/integrations/tensorflow/e2e/keras/lstm_static_test.py
index 64db229..db8f86e 100644
--- a/integrations/tensorflow/e2e/keras/lstm_static_test.py
+++ b/integrations/tensorflow/e2e/keras/lstm_static_test.py
@@ -27,10 +27,10 @@
 INPUT_SHAPE = [NUM_BATCH, NUM_TIMESTEPS, NUM_UNITS]
 
 
-class LstmStatic(tf.Module):
+class LstmStaticModule(tf.Module):
 
   def __init__(self):
-    super(LstmStatic, self).__init__()
+    super(LstmStaticModule, self).__init__()
     tf_utils.set_random_seed()
     inputs = tf.keras.layers.Input(batch_size=NUM_BATCH, shape=INPUT_SHAPE[1:])
     outputs = tf.keras.layers.LSTM(
@@ -42,7 +42,7 @@
             self.m.call)
 
 
-@tf_test_utils.compile_module(LstmStatic, exported_names=["predict"])
+@tf_test_utils.compile_module(LstmStaticModule, exported_names=["predict"])
 class LstmStaticTest(tf_test_utils.TracedModuleTestCase):
 
   def test_lstm(self):
diff --git a/integrations/tensorflow/e2e/keras/lstm_test.py b/integrations/tensorflow/e2e/keras/lstm_test.py
index 8232d19..112e32a 100644
--- a/integrations/tensorflow/e2e/keras/lstm_test.py
+++ b/integrations/tensorflow/e2e/keras/lstm_test.py
@@ -25,10 +25,10 @@
 INPUT_SHAPE = [NUM_BATCH, NUM_TIMESTEPS, NUM_UNITS]
 
 
-class Lstm(tf.Module):
+class LstmModule(tf.Module):
 
   def __init__(self):
-    super(Lstm, self).__init__()
+    super(LstmModule, self).__init__()
     tf_utils.set_random_seed()
     inputs = tf.keras.layers.Input(batch_size=None, shape=DYNAMIC_SHAPE[1:])
     outputs = tf.keras.layers.LSTM(
@@ -40,7 +40,7 @@
             self.m.call)
 
 
-@tf_test_utils.compile_module(Lstm, exported_names=["predict"])
+@tf_test_utils.compile_module(LstmModule, exported_names=["predict"])
 class LstmTest(tf_test_utils.TracedModuleTestCase):
 
   def test_lstm(self):
diff --git a/integrations/tensorflow/e2e/simple_stateful_test.py b/integrations/tensorflow/e2e/simple_stateful_test.py
index eff49a8..1120a4f 100644
--- a/integrations/tensorflow/e2e/simple_stateful_test.py
+++ b/integrations/tensorflow/e2e/simple_stateful_test.py
@@ -18,10 +18,10 @@
 import tensorflow.compat.v2 as tf
 
 
-class Stateful(tf.Module):
+class SimpleStatefulModule(tf.Module):
 
   def __init__(self):
-    super(Stateful, self).__init__()
+    super(SimpleStatefulModule, self).__init__()
     self.counter = tf.Variable(0.0)
 
   @tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
@@ -33,7 +33,7 @@
     return self.counter
 
 
-@tf_test_utils.compile_module(Stateful)
+@tf_test_utils.compile_module(SimpleStatefulModule)
 class StatefulTest(tf_test_utils.TracedModuleTestCase):
 
   def test_stateful(self):