Moving regression suite to azure (#17140)

ci-exactly: build_packages, regression_test_cpu,
regression_test_amdgpu_vulkan, regression_test_amdgpu_rocm,
regression_test_nvidiagpu_vulkan, regression_test_nvidiagpu_cuda
diff --git a/experimental/regression_suite/tests/pregenerated/test_llama2.py b/experimental/regression_suite/tests/pregenerated/test_llama2.py
index dcde908..af9c8d2 100644
--- a/experimental/regression_suite/tests/pregenerated/test_llama2.py
+++ b/experimental/regression_suite/tests/pregenerated/test_llama2.py
@@ -19,12 +19,12 @@
 ]
 
 llama2_7b_f16qi4_stripped_source = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/llama_regression/09152023/llama2_7b_int4_stripped.mlir",
+    "https://sharktank.blob.core.windows.net/sharktank/llama_regression/09152023/llama2_7b_int4_stripped.mlir",
     group="llama2_7b_f16qi4_stripped",
 )
 
 llama2_7b_f16qi4_source = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/llama_regression/llama2_7b_int4.mlir",
+    "https://sharktank.blob.core.windows.net/sharktank/llama_regression/llama2_7b_int4.mlir",
     group="llama2_7b_f16qi4",
 )
 
@@ -246,22 +246,22 @@
 
 
 llama2_7b_f16qi4_first_input_cpu = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/llama_regression/llama2-7b-i4-golden-outputs/cpu/first_vicuna_forward_input.npy",
+    "https://sharktank.blob.core.windows.net/sharktank/llama_regression/llama2-7b-i4-golden-outputs/cpu/first_vicuna_forward_input.npy",
     group="llama2_7b_f16qi4_first_input_cpu",
 )
 
 llama2_7b_f16qi4_first_output_cpu = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/llama_regression/llama2-7b-i4-golden-outputs/cpu/first_vicuna_forward_output.npy",
+    "https://sharktank.blob.core.windows.net/sharktank/llama_regression/llama2-7b-i4-golden-outputs/cpu/first_vicuna_forward_output.npy",
     group="llama2_7b_f16qi4_first_output_cpu",
 )
 
 llama2_7b_f16qi4_second_input_cpu = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/llama_regression/llama2-7b-i4-golden-outputs/cpu/second_vicuna_forward_input.npy",
+    "https://sharktank.blob.core.windows.net/sharktank/llama_regression/llama2-7b-i4-golden-outputs/cpu/second_vicuna_forward_input.npy",
     group="llama2_7b_f16qi4_second_input_cpu",
 )
 
 llama2_7b_f16qi4_second_output_cpu = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/llama_regression/llama2-7b-i4-golden-outputs/cpu/second_vicuna_forward_output.npy",
+    "https://sharktank.blob.core.windows.net/sharktank/llama_regression/llama2-7b-i4-golden-outputs/cpu/second_vicuna_forward_output.npy",
     group="llama2_7b_f16qi4_second_output_cpu",
 )
 
@@ -297,22 +297,22 @@
 
 
 llama2_7b_f16qi4_first_input_cuda = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/llama_regression/llama2-7b-i4-golden-outputs/cuda/first_vicuna_forward_input.npy",
+    "https://sharktank.blob.core.windows.net/sharktank/llama_regression/llama2-7b-i4-golden-outputs/cuda/first_vicuna_forward_input.npy",
     group="llama2_7b_f16qi4_first_input_cuda",
 )
 
 llama2_7b_f16qi4_first_output_cuda = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/llama_regression/llama2-7b-i4-golden-outputs/cuda/first_vicuna_forward_output.npy",
+    "https://sharktank.blob.core.windows.net/sharktank/llama_regression/llama2-7b-i4-golden-outputs/cuda/first_vicuna_forward_output.npy",
     group="llama2_7b_f16qi4_first_output_cuda",
 )
 
 llama2_7b_f16qi4_second_input_cuda = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/llama_regression/llama2-7b-i4-golden-outputs/cuda/second_vicuna_forward_input.npy",
+    "https://sharktank.blob.core.windows.net/sharktank/llama_regression/llama2-7b-i4-golden-outputs/cuda/second_vicuna_forward_input.npy",
     group="llama2_7b_f16qi4_second_input_cuda",
 )
 
 llama2_7b_f16qi4_second_output_cuda = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/llama_regression/llama2-7b-i4-golden-outputs/cuda/second_vicuna_forward_output.npy",
+    "https://sharktank.blob.core.windows.net/sharktank/llama_regression/llama2-7b-i4-golden-outputs/cuda/second_vicuna_forward_output.npy",
     group="llama2_7b_f16qi4_second_output_cuda",
 )
 
diff --git a/experimental/regression_suite/tests/pregenerated/test_ukernel.py b/experimental/regression_suite/tests/pregenerated/test_ukernel.py
index f96225d..90e183a 100644
--- a/experimental/regression_suite/tests/pregenerated/test_ukernel.py
+++ b/experimental/regression_suite/tests/pregenerated/test_ukernel.py
@@ -16,7 +16,7 @@
 ]
 
 argmax_ukernel_source = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/ukernel_regression/20231217/argmax/argmax_3d_linalg.mlir",
+    "https://sharktank.blob.core.windows.net/sharktank/ukernel_regression/20231217/argmax/argmax_3d_linalg.mlir",
     group="argmax_ukernel_linalg",
 )
 
@@ -75,22 +75,22 @@
 #       cannot compare signless i64 from vmfb and by default si64 from npy.
 
 argmax_input_f16 = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/ukernel_regression/20231217/argmax/argmax_3d_input_f16.npy",
+    "https://sharktank.blob.core.windows.net/sharktank/ukernel_regression/20231217/argmax/argmax_3d_input_f16.npy",
     group="argmax_ukernel_input_f16",
 )
 
 argmax_output_f16 = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/ukernel_regression/20231217/argmax/argmax_3d_output_f16.npy",
+    "https://sharktank.blob.core.windows.net/sharktank/ukernel_regression/20231217/argmax/argmax_3d_output_f16.npy",
     group="argmax_ukernel_output_f16",
 )
 
 argmax_input_f32 = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/ukernel_regression/20231217/argmax/argmax_3d_input_f32.npy",
+    "https://sharktank.blob.core.windows.net/sharktank/ukernel_regression/20231217/argmax/argmax_3d_input_f32.npy",
     group="argmax_ukernel_input_f32",
 )
 
 argmax_output_f32 = fetch_source_fixture(
-    "https://storage.googleapis.com/shark_tank/ukernel_regression/20231217/argmax/argmax_3d_output_f32.npy",
+    "https://sharktank.blob.core.windows.net/sharktank/ukernel_regression/20231217/argmax/argmax_3d_output_f32.npy",
     group="argmax_ukernel_output_f32",
 )