Merge pull request #6708 from ThomasRaoux:main-to-google PiperOrigin-RevId: 389712115
diff --git a/.github/workflows/build_package.yml b/.github/workflows/build_package.yml index 8956818..7aa23d0 100644 --- a/.github/workflows/build_package.yml +++ b/.github/workflows/build_package.yml
@@ -97,7 +97,7 @@ CIBW_MANYLINUX_X86_64_IMAGE: stellaraccident/manylinux2014_x86_64-bazel-3.7.2:latest # CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014 - CIBW_BUILD: "cp36-* cp37-* cp38-* cp39-*" + CIBW_BUILD: "cp37-* cp38-* cp39-*" CIBW_SKIP: "*-win32 *-manylinux_i686" steps:
diff --git a/CMakeLists.txt b/CMakeLists.txt index cb23a4d..d281b8a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt
@@ -37,7 +37,6 @@ option(IREE_ENABLE_RUNTIME_TRACING "Enables instrumented runtime tracing." OFF) option(IREE_ENABLE_COMPILER_TRACING "Enables instrumented compiler tracing." OFF) -option(IREE_ENABLE_MLIR "Enables MLIR/LLVM dependencies." ON) option(IREE_ENABLE_EMITC "Enables MLIR EmitC dependencies." OFF) option(IREE_BUILD_COMPILER "Builds the IREE compiler." ON) @@ -96,22 +95,9 @@ # Derived flags based on primary options #------------------------------------------------------------------------------- -if(${IREE_BUILD_COMPILER}) - set(IREE_ENABLE_MLIR ON CACHE BOOL "Enable LLVM dependencies if the IREE compiler is build." FORCE) -endif() - -if(${IREE_ENABLE_MLIR}) - set(IREE_MLIR_DEP_MODE "BUNDLED" CACHE STRING "One of BUNDLED (default), DISABLED, INSTALLED") -endif() - -if(${IREE_ENABLE_EMITC}) - if(NOT ${IREE_ENABLE_MLIR}) - message(FATAL_ERROR "Enabling EmitC requires setting IREE_ENABLE_MLIR to ON.") - endif() - - string(TOUPPER "${IREE_MLIR_DEP_MODE}" uppercase_IREE_MLIR_DEP_MODE) - if(NOT uppercase_IREE_MLIR_DEP_MODE MATCHES "^(BUNDLED|INSTALLED)$") - message(FATAL_ERROR "Enabling EmitC requires IREE_MLIR_DEP_MODE set to BUNDELED or INSTALLED.") +if(IREE_ENABLE_EMITC) + if(NOT IREE_BUILD_COMPILER) + message(FATAL_ERROR "Enabling EmitC requires setting IREE_BUILD_COMPILER to ON.") endif() endif() @@ -273,6 +259,7 @@ include(iree_cc_test) include(iree_tablegen_library) include(iree_tablegen_doc) +include(iree_third_party_cmake_options) include(iree_c_embed_data) include(iree_bytecode_module) include(iree_c_module) @@ -341,102 +328,73 @@ #------------------------------------------------------------------------------- # MLIR/LLVM Dependency -# We treat the LLVM dependency specially because we support several different -# ways to use it: -# - Bundled (default): a source dependency directly on the -# third_party/llvm-project submodule. -# - External: An external (source or installed) dependency on LLVM. -# - Provided: When IREE is used as a sub-project, it is assumed that the LLVM -# dependency is added prior to including this configuration. #------------------------------------------------------------------------------- -# Disable LLVM's warnings. -set(LLVM_ENABLE_WARNINGS OFF CACHE BOOL "don't use global flags /facepalm") - -# Adds bundled projects that must be included after the LLVM directory has -# been added and within the scope of its settings (i.e. build type override, -# etc). -function(add_bundled_mlir_dependent_projects) - if(${IREE_BUILD_COMPILER}) - add_subdirectory(third_party/mlir-hlo EXCLUDE_FROM_ALL) +if(NOT IREE_BUILD_COMPILER) + message(STATUS "Not adding LLVM/MLIR because the configuration does not require it") +elseif(TARGET LLVMSupport) + message(STATUS "Not adding IREE bundled LLVM because it has already been included") + if(NOT TARGET MLIRIR) + message(FATAL_ERROR "Detected externally provided LLVM project but could not find MLIR projects (is it enabled/installed?)") endif() -endfunction() +else() + message(STATUS "Adding bundled LLVM source dependency") + iree_set_llvm_cmake_options() -function(add_iree_mlir_src_dep llvm_monorepo_path) + # Disable LLVM's warnings. + set(LLVM_ENABLE_WARNINGS OFF CACHE BOOL "don't use global flags /facepalm") + # Stash cmake build type in case LLVM messes with it. set(_CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}") - add_subdirectory("${llvm_monorepo_path}/llvm" "third_party/llvm-project/llvm" EXCLUDE_FROM_ALL) + add_subdirectory("third_party/llvm-project/llvm" EXCLUDE_FROM_ALL) # Reset CMAKE_BUILD_TYPE to its previous setting. set(CMAKE_BUILD_TYPE "${_CMAKE_BUILD_TYPE}" CACHE STRING "Build type (default ${DEFAULT_CMAKE_BUILD_TYPE})" FORCE) -endfunction() -if(${IREE_ENABLE_MLIR}) - if(${IREE_MLIR_DEP_MODE} STREQUAL "DISABLED") - message(STATUS "Not adding MLIR/LLVM dep due to IREE_MLIR_DEP_MODE=DISABLED") - elseif(${IREE_MLIR_DEP_MODE} STREQUAL "BUNDLED") - message(STATUS "Adding bundled LLVM source dependency") - add_iree_mlir_src_dep("third_party/llvm-project") + # Extend module path to allow submodules to use LLVM and MLIR CMake modules. + list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_BINARY_DIR}/lib/cmake/mlir") + list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_BINARY_DIR}/third_party/llvm-project/llvm/lib/cmake/llvm/") - # Extend module path to allow submodules to use LLVM and MLIR CMake modules. - list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_BINARY_DIR}/lib/cmake/mlir") - list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_BINARY_DIR}/third_party/llvm-project/llvm/lib/cmake/llvm/") + # Add the bundled include directories for cmake files looking for them. + list(APPEND LLVM_INCLUDE_DIRS + ${CMAKE_CURRENT_SOURCE_DIR}/third_party/llvm-project/llvm/include + ${CMAKE_CURRENT_BINARY_DIR}/third_party/llvm-project/llvm/include + ) + list(APPEND MLIR_INCLUDE_DIRS + ${CMAKE_CURRENT_SOURCE_DIR}/third_party/llvm-project/mlir/include + ${CMAKE_CURRENT_BINARY_DIR}/third_party/llvm-project/llvm/tools/mlir/include + ) - # Add the bundled include directories for cmake files looking for them. - list(APPEND LLVM_INCLUDE_DIRS - ${CMAKE_CURRENT_SOURCE_DIR}/third_party/llvm-project/llvm/include - ${CMAKE_CURRENT_BINARY_DIR}/third_party/llvm-project/llvm/include - ) - list(APPEND MLIR_INCLUDE_DIRS - ${CMAKE_CURRENT_SOURCE_DIR}/third_party/llvm-project/mlir/include - ${CMAKE_CURRENT_BINARY_DIR}/third_party/llvm-project/llvm/tools/mlir/include - ) + # Avoid globally modifying paths by instead adding the include paths to the + # rules that really should have them in the first place. + target_include_directories(LLVMSupport PUBLIC + $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/third_party/llvm-project/llvm/include> + $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}/third_party/llvm-project/llvm/include> + ) + target_include_directories(MLIRSupport PUBLIC + $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/third_party/llvm-project/mlir/include> + $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}/third_party/llvm-project/llvm/tools/mlir/include> + ) +endif() - # Avoid globally modifying paths by instead adding the include paths to the - # rules that really should have them in the first place. - target_include_directories(LLVMSupport PUBLIC - $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/third_party/llvm-project/llvm/include> - $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}/third_party/llvm-project/llvm/include> - ) - target_include_directories(MLIRSupport PUBLIC - $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/third_party/llvm-project/mlir/include> - $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}/third_party/llvm-project/llvm/tools/mlir/include> - ) - - # Set build option to use MHLO alongside with bundled MLIR - set(MHLO_BUILD_EMBEDDED ON) - elseif(${IREE_MLIR_DEP_MODE} STREQUAL "INSTALLED") - # Deps of installed MLIR/LLVM packages. - find_package(ZLIB) # See: https://reviews.llvm.org/D79219 - message(STATUS "Looking for installed MLIR/LLVM packages (configure with MLIR_DIR variable)") - find_package(MLIR REQUIRED CONFIG) - message(STATUS "Using MLIRConfig.cmake in: ${MLIR_DIR}") - message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") - list(APPEND CMAKE_MODULE_PATH "${MLIR_CMAKE_DIR}") - list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}") - include(TableGen) - include(AddLLVM) - include(AddMLIR) - include(HandleLLVMOptions) - - # Add include/link directories - include_directories(SYSTEM ${LLVM_INCLUDE_DIRS}) - include_directories(SYSTEM ${MLIR_INCLUDE_DIRS}) - link_directories(${LLVM_BUILD_LIBRARY_DIR}) - add_definitions(${LLVM_DEFINITIONS}) +# Add bundled mlir-hlo project unless if already provided. +if(IREE_BUILD_COMPILER) + if(TARGET MLIRMhloUtils) + message(STATUS "Not adding IREE bundled mlir-hlo project because it is already present") else() - message(FATAL "Unsupported IREE_MLIR_DEP_MODE=${IREE_MLIR_DEP_MODE}") + message(STATUS "Adding IREE bundled mlir-hlo project") + # TODO: Have better sub project detection upstream. + set(MHLO_BUILD_EMBEDDED ON) + add_subdirectory(third_party/mlir-hlo EXCLUDE_FROM_ALL) endif() - - add_bundled_mlir_dependent_projects() endif() #------------------------------------------------------------------------------- # Python bindings. #------------------------------------------------------------------------------- -if(${IREE_BUILD_PYTHON_BINDINGS}) +if(IREE_BUILD_PYTHON_BINDINGS) # Note: Optional because python libs can be manually specified. find_package(Python3 COMPONENTS Interpreter Development REQUIRED) endif() @@ -449,17 +407,24 @@ include(flatbuffer_c_library) add_subdirectory(build_tools/third_party/cuda EXCLUDE_FROM_ALL) -add_subdirectory(build_tools/third_party/flatcc EXCLUDE_FROM_ALL) add_subdirectory(build_tools/third_party/libyaml EXCLUDE_FROM_ALL) add_subdirectory(build_tools/third_party/stblib EXCLUDE_FROM_ALL) add_subdirectory(build_tools/third_party/vulkan_memory_allocator EXCLUDE_FROM_ALL) +iree_set_googletest_cmake_options() add_subdirectory(third_party/googletest EXCLUDE_FROM_ALL) -if(${IREE_ENABLE_THREADING}) + +if(IREE_ENABLE_THREADING) + iree_set_benchmark_cmake_options() add_subdirectory(third_party/benchmark EXCLUDE_FROM_ALL) + iree_set_cpuinfo_cmake_options() add_subdirectory(third_party/cpuinfo EXCLUDE_FROM_ALL) endif() + +iree_set_flatcc_cmake_options() +add_subdirectory(build_tools/third_party/flatcc EXCLUDE_FROM_ALL) add_subdirectory(third_party/flatcc EXCLUDE_FROM_ALL) + add_subdirectory(third_party/vulkan_headers EXCLUDE_FROM_ALL) # TODO(scotttodd): Iterate some more and find a better place for this. @@ -469,23 +434,24 @@ RUNTIME DESTINATION bin) endif() -if(${IREE_BUILD_COMPILER}) +if(IREE_BUILD_COMPILER) add_subdirectory(build_tools/third_party/mlir-hlo EXCLUDE_FROM_ALL) endif() -if(${IREE_ENABLE_EMITC}) +if(IREE_ENABLE_EMITC) add_subdirectory(build_tools/third_party/mlir-emitc EXCLUDE_FROM_ALL) endif() -if(${IREE_BUILD_TESTS}) +if(IREE_BUILD_TESTS) enable_testing(iree) endif() -if(${IREE_BUILD_PYTHON_BINDINGS}) +if(IREE_BUILD_PYTHON_BINDINGS) add_subdirectory(third_party/pybind11 EXCLUDE_FROM_ALL) endif() -if(${IREE_TARGET_BACKEND_METAL-SPIRV}) +if(IREE_TARGET_BACKEND_METAL-SPIRV) + iree_set_spirv_cross_cmake_options() # SPIRV-Cross is needed to cross compile SPIR-V into MSL source code. add_subdirectory(third_party/spirv_cross EXCLUDE_FROM_ALL) endif()
diff --git a/WORKSPACE b/WORKSPACE index cf71744..ee95cb5 100644 --- a/WORKSPACE +++ b/WORKSPACE
@@ -93,7 +93,7 @@ rbe_autoconfig( name = "rbe_default", base_container_digest = "sha256:1a8ed713f40267bb51fe17de012fa631a20c52df818ccb317aaed2ee068dfc61", - digest = "sha256:d69c260b98a97ad430d34c4591fb2399e00888750f5d47ede00c1e6f3e774e5a", + digest = "sha256:62b161e79413f0f59ae3845c377b10e60a4a639f3d32569a82b620f017837a68", registry = "gcr.io", repository = "iree-oss/rbe-toolchain", use_checked_in_confs = "Force",
diff --git a/benchmarks/TensorFlow/CMakeLists.txt b/benchmarks/TensorFlow/CMakeLists.txt index fb5c8e2..0c3f5dd 100644 --- a/benchmarks/TensorFlow/CMakeLists.txt +++ b/benchmarks/TensorFlow/CMakeLists.txt
@@ -21,7 +21,7 @@ "fp16" # MODULE_TAGS # This uses the same input MLIR source as fp32 to save download time. # It requires users to have "--iree-flow-demote-f32-to-f16". - "https://storage.googleapis.com/iree-model-artifacts/MobileBertSquad-810f6fdc.tar.gz" # MLIR_SOURCE + "https://storage.googleapis.com/iree-model-artifacts/MobileBertSquad-9e4b02e4b.tar.gz" # MLIR_SOURCE "serving_default" # ENTRY_FUNCTION # The conversion done by "--iree-flow-demote-f32-to-f16" won't change the # original input signature. @@ -31,7 +31,7 @@ set(MOBILEBERT_FP32_MODULE "MobileBertSquad" # MODULE_NAME "fp32" # MODULE_TAGS - "https://storage.googleapis.com/iree-model-artifacts/MobileBertSquad-810f6fdc.tar.gz" # MLIR_SOURCE + "https://storage.googleapis.com/iree-model-artifacts/MobileBertSquad-9e4b02e4b.tar.gz" # MLIR_SOURCE "serving_default" # ENTRY_FUNCTION "1x384xi32,1x384xi32,1x384xi32" # FUNCTION_INPUTS ) @@ -39,7 +39,7 @@ set(MOBILENET_V2_MODULE "MobileNetV2" # MODULE_NAME "fp32,imagenet" # MODULE_TAGS - "https://storage.googleapis.com/iree-model-artifacts/MobileNetV2-b0c5c584.tar.gz" # MLIR_SOURCE + "https://storage.googleapis.com/iree-model-artifacts/MobileNetV2-9e4b02e4b.tar.gz" # MLIR_SOURCE "call" # ENTRY_FUNCTION "1x224x224x3xf32" # FUNCTION_INPUTS ) @@ -47,7 +47,7 @@ set(MOBILENET_V3SMALL_MODULE "MobileNetV3Small" # MODULE_NAME "fp32,imagenet" # MODULE_TAGS - "https://storage.googleapis.com/iree-model-artifacts/MobileNetV3Small-b0c5c584.tar.gz" # MLIR_SOURCE + "https://storage.googleapis.com/iree-model-artifacts/MobileNetV3Small-9e4b02e4b.tar.gz" # MLIR_SOURCE "call" # ENTRY_FUNCTION "1x224x224x3xf32" # FUNCTION_INPUTS )
diff --git a/bindings/python/iree/compiler/setup.py.in b/bindings/python/iree/compiler/setup.py.in index 71c1942..c2c605b 100644 --- a/bindings/python/iree/compiler/setup.py.in +++ b/bindings/python/iree/compiler/setup.py.in
@@ -68,7 +68,7 @@ "Operating System :: OS Independent", "Development Status :: 3 - Alpha", ], - python_requires=">=3.6", + python_requires=">=3.7", packages=find_namespace_packages( include=["iree.compiler", "iree.compiler.*", "iree.tools.core"]), package_data={
diff --git a/bindings/python/iree/compiler/tf.py b/bindings/python/iree/compiler/tf.py index e8a3b55..65c81b1 100644 --- a/bindings/python/iree/compiler/tf.py +++ b/bindings/python/iree/compiler/tf.py
@@ -84,6 +84,7 @@ save_temp_tf_input: Optional[str] = None, save_temp_mid_level_input: Optional[str] = None, save_temp_iree_input: Optional[str] = None, + use_tosa: bool = False, **kwargs): """Initialize options from keywords. @@ -116,6 +117,7 @@ self.save_temp_tf_input = save_temp_tf_input self.save_temp_mid_level_input = save_temp_mid_level_input self.save_temp_iree_input = save_temp_iree_input + self.use_tosa = use_tosa def build_import_command_line(input_path: str, tfs: TempFileSaver, @@ -164,6 +166,9 @@ if save_iree_input: cl.append(f"--save-temp-iree-input={save_iree_input}") + if options.use_tosa: + cl.append(f"--use-tosa") + # Crash reproducer (locally qualified). requested_crash_reproducer_path = options.crash_reproducer_path if requested_crash_reproducer_path:
diff --git a/bindings/python/iree/jax/setup.py.in b/bindings/python/iree/jax/setup.py.in index 59ad912..db31229 100644 --- a/bindings/python/iree/jax/setup.py.in +++ b/bindings/python/iree/jax/setup.py.in
@@ -32,7 +32,7 @@ "Operating System :: OS Independent", "Development Status :: 3 - Alpha", ], - python_requires=">=3.6", + python_requires=">=3.7", packages=find_namespace_packages(include=["iree.jax"]), zip_safe=True, install_requires = [
diff --git a/bindings/python/iree/runtime/CMakeLists.txt b/bindings/python/iree/runtime/CMakeLists.txt index d501783..0b9d574 100644 --- a/bindings/python/iree/runtime/CMakeLists.txt +++ b/bindings/python/iree/runtime/CMakeLists.txt
@@ -43,6 +43,13 @@ iree_py_test( NAME + function_test + SRCS + "function_test.py" +) + +iree_py_test( + NAME hal_test SRCS "hal_test.py"
diff --git a/bindings/python/iree/runtime/function.py b/bindings/python/iree/runtime/function.py index e46b29f..7fef1c6 100644 --- a/bindings/python/iree/runtime/function.py +++ b/bindings/python/iree/runtime/function.py
@@ -5,7 +5,7 @@ # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -from typing import Optional +from typing import Dict, Optional import json import logging @@ -66,7 +66,9 @@ "_abi_dict", "_arg_descs", "_ret_descs", - "_has_kwargs", + "_named_arg_indices", + "_max_named_arg_index", + "_has_inlined_results", "_tracer", ] @@ -83,7 +85,9 @@ self._abi_dict = None self._arg_descs = None self._ret_descs = None - self._has_kwargs = False + self._has_inlined_results = False + self._named_arg_indices: Dict[str, int] = {} + self._max_named_arg_index: int = -1 self._parse_abi_dict(vm_function) @property @@ -101,16 +105,21 @@ inv = Invocation(self._device) ret_descs = self._ret_descs - # If kwargs are present, we treat those more as kwarg-only parameters (i.e. - # you cannot just arbitrarily use them to override positional arguments - # by name in the current implementation). If the backing ABI metadata - # declares support for kwargs, this will be done by having a final - # 'kwargs_sdict' arg descriptor, and we rewrite into this form. - # So we just append the kwargs dict to the args list and let decoding - # happen normally. - if self._has_kwargs: + # Merge keyword args in by name->position mapping. + if kwargs: args = list(args) - args.append(kwargs if kwargs else dict()) + len_delta = self._max_named_arg_index - len(args) + 1 + if len_delta > 0: + args.extend([NotImplemented] * len_delta) + for kwarg_key, kwarg_value in kwargs.items(): + try: + kwarg_index = self._named_arg_indices[kwarg_key] + except KeyError: + raise ArgumentError(f"specified kwarg '{kwarg_key}' is unknown") + len_delta = kwarg_index - len(args) + 1 + if len_delta <= 0: + args.extend([NotImplemented] * len_delta) + args[kwarg_index] = kwarg_value arg_list = VmVariantList(len(args)) ret_list = VmVariantList(len(ret_descs) if ret_descs is not None else 1) @@ -120,7 +129,14 @@ self._vm_context.invoke(self._vm_function, arg_list, ret_list) if call_trace: call_trace.add_vm_list(ret_list, "results") - returns = _extract_vm_sequence_to_python(inv, ret_list, ret_descs) + + # Un-inline the results to align with reflection, as needed. + reflection_aligned_ret_list = ret_list + if self._has_inlined_results: + reflection_aligned_ret_list = VmVariantList(1) + reflection_aligned_ret_list.push_list(ret_list) + returns = _extract_vm_sequence_to_python(inv, reflection_aligned_ret_list, + ret_descs) if call_trace: call_trace.end_call() return_arity = len(returns) @@ -157,11 +173,23 @@ raise RuntimeError( f"Malformed function reflection metadata structure: {reflection}") - # See if kwargs are expected. - if self._arg_descs: - maybe_kwargs_desc = self._arg_descs[-1] - if maybe_kwargs_desc and maybe_kwargs_desc[0] == "sdict_kwargs": - self._has_kwargs = True + # Post-process the arg descs to transform "named" records to just their + # type, stashing the index. + for i in range(len(self._arg_descs)): + maybe_named_desc = self._arg_descs[i] + if maybe_named_desc and maybe_named_desc[0] == "named": + arg_name, arg_type_desc = maybe_named_desc[1:] + self._arg_descs[i] = arg_type_desc + self._named_arg_indices[arg_name] = i + if i > self._max_named_arg_index: + self._max_named_arg_index = i + + # Detect whether the results are a slist/stuple/sdict, which indicates + # that they are inlined with the function's results. + if len(self._ret_descs) == 1: + maybe_inlined = self._ret_descs[0] + if maybe_inlined and maybe_inlined[0] in ["slist", "stuple", "sdict"]: + self._has_inlined_results = True def __repr__(self): return repr(self._vm_function) @@ -174,6 +202,10 @@ # desc: The ABI descriptor list (or None if in dynamic mode). +def _missing_argument(inv: Invocation, t: VmVariantList, x, desc): + _raise_argument_error(inv, f"a required argument was not specified") + + def _bool_to_vm(inv: Invocation, t: VmVariantList, x, desc): _int_to_vm(inv, t, int(x), desc) @@ -216,7 +248,7 @@ def _dict_to_vm(inv: Invocation, t: VmVariantList, x, desc): desc_type = desc[0] - if desc_type != "sdict" and desc_type != "sdict_kwargs": + if desc_type != "sdict": _raise_argument_error(inv, f"passed a dict but expected {desc_type}") # When decoding a dict, the desc object is like: # ['sdict', ['key0', [...value_type_0...]], ['key1', [...value_type_1...]]]] @@ -276,6 +308,7 @@ PYTHON_TO_VM_CONVERTERS = { + NotImplemented.__class__: _missing_argument, bool: _bool_to_vm, int: _int_to_vm, float: _float_to_vm, @@ -325,11 +358,33 @@ return tuple(_vm_to_slist(inv, vm_list, vm_index, desc)) +def _vm_to_scalar(type_bound: type): + + def convert(inv: Invocation, vm_list: VmVariantList, vm_index: int, desc): + value = vm_list.get_variant(vm_index) + if not isinstance(value, type_bound): + raise ReturnError( + f"expected an {type_bound} value but got {value.__class__}") + return value + + return convert + + VM_TO_PYTHON_CONVERTERS = { "ndarray": _vm_to_ndarray, "sdict": _vm_to_sdict, "slist": _vm_to_slist, "stuple": _vm_to_stuple, + + # Scalars. + "i8": _vm_to_scalar(int), + "i16": _vm_to_scalar(int), + "i32": _vm_to_scalar(int), + "i64": _vm_to_scalar(int), + "f16": _vm_to_scalar(float), + "f32": _vm_to_scalar(float), + "f64": _vm_to_scalar(float), + "bf16": _vm_to_scalar(float), } ABI_TYPE_TO_DTYPE = { @@ -381,11 +436,20 @@ return dtype(x) +class ArgumentError(ValueError): + pass + + +class ReturnError(ValueError): + pass + + def _raise_argument_error(inv: Invocation, summary: str, e: Optional[Exception] = None): - new_e = ValueError(f"Error passing argument: {summary} " - f"(while encoding argument {inv.summarize_arg_error()})") + new_e = ArgumentError( + f"Error passing argument: {summary} " + f"(while encoding argument {inv.summarize_arg_error()})") if e: raise new_e from e else: @@ -395,8 +459,8 @@ def _raise_return_error(inv: Invocation, summary: str, e: Optional[Exception] = None): - new_e = ValueError(f"Error processing function return: {summary} " - f"(while decoding return {inv.summarize_return_error()})") + new_e = ReturnError(f"Error processing function return: {summary} " + f"(while decoding return {inv.summarize_return_error()})") if e: raise new_e from e else: @@ -429,6 +493,8 @@ f" (for desc {desc})") try: converter(inv, vm_list, py_value, desc) + except ArgumentError: + raise except Exception as e: _raise_argument_error(inv, f"exception converting from Python type to VM", e) @@ -451,13 +517,15 @@ converted = vm_list.get_variant(vm_index) else: # Known type descriptor. - vm_type = desc[0] + vm_type = desc if isinstance(desc, str) else desc[0] try: converter = VM_TO_PYTHON_CONVERTERS[vm_type] except KeyError: _raise_return_error(inv, f"cannot map VM type to Python: {vm_type}") try: converted = converter(inv, vm_list, vm_index, desc) + except ReturnError: + raise except Exception as e: _raise_return_error(inv, f"exception converting from VM type to Python", e)
diff --git a/bindings/python/iree/runtime/function_test.py b/bindings/python/iree/runtime/function_test.py new file mode 100644 index 0000000..3db2671 --- /dev/null +++ b/bindings/python/iree/runtime/function_test.py
@@ -0,0 +1,202 @@ +# Lint as: python3 +# Copyright 2019 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +import json + +from absl.testing import absltest + +from iree import runtime as rt +from iree.runtime.function import FunctionInvoker +from iree.runtime.binding import VmVariantList + + +class MockVmContext: + + def __init__(self, invoke_callback): + self._invoke_callback = invoke_callback + self.invocations = [] + + def invoke(self, vm_function, arg_list, ret_list): + self._invoke_callback(arg_list, ret_list) + self.invocations.append((vm_function, arg_list, ret_list)) + print(f"INVOKE: {arg_list} -> {ret_list}") + + @property + def mock_arg_reprs(self): + return repr([arg_list for _, arg_list, _ in self.invocations]) + + +class MockVmFunction: + + def __init__(self, reflection): + self.reflection = reflection + + +class FunctionTest(absltest.TestCase): + + def setUp(self): + # Doesn't matter what device. We just need one. + config = rt.Config("vmvx") + self.device = config.device + + def testNoReflectionScalars(self): + + def invoke(arg_list, ret_list): + ret_list.push_int(3) + ret_list.push_int(4) + + vm_context = MockVmContext(invoke) + vm_function = MockVmFunction(reflection={}) + invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None) + result = invoker(1, 2) + self.assertEqual("[<VmVariantList(2): [1, 2]>]", vm_context.mock_arg_reprs) + self.assertEqual((3, 4), result) + + def testKeywordArgs(self): + + def invoke(arg_list, ret_list): + ret_list.push_int(3) + + vm_context = MockVmContext(invoke) + vm_function = MockVmFunction( + reflection={ + "iree.abi": + json.dumps({ + "a": [ + "i32", + ["named", "a", "i32"], + ["named", "b", "i32"], + ], + "r": ["i32",], + }) + }) + invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None) + result = invoker(-1, a=1, b=2) + self.assertEqual("[<VmVariantList(3): [-1, 1, 2]>]", + vm_context.mock_arg_reprs) + self.assertEqual(3, result) + + def testInlinedResults(self): + + def invoke(arg_list, ret_list): + ret_list.push_int(3) + ret_list.push_int(4) + + vm_context = MockVmContext(invoke) + vm_function = MockVmFunction(reflection={ + "iree.abi": json.dumps({ + "a": [], + "r": [["slist", "i32", "i32"]], + }) + }) + invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None) + result = invoker() + self.assertEqual([3, 4], result) + + def testNestedResults(self): + + def invoke(arg_list, ret_list): + ret_list.push_int(3) + sub_list = VmVariantList(2) + sub_dict = VmVariantList(2) + sub_dict.push_int(100) + sub_dict.push_int(200) + sub_list.push_list(sub_dict) + sub_list.push_int(6) + ret_list.push_list(sub_list) + + vm_context = MockVmContext(invoke) + vm_function = MockVmFunction( + reflection={ + "iree.abi": + json.dumps({ + "a": [], + "r": [ + "i32", + [ + "slist", + ["sdict", ["bar", "i32"], ["foo", "i32"]], + "i64", + ] + ], + }) + }) + invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None) + result = invoker() + self.assertEqual((3, [{'bar': 100, 'foo': 200}, 6]), result) + + def testMissingPositional(self): + + def invoke(arg_list, ret_list): + ret_list.push_int(3) + + vm_context = MockVmContext(invoke) + vm_function = MockVmFunction( + reflection={ + "iree.abi": + json.dumps({ + "a": [ + "i32", + ["named", "a", "i32"], + ["named", "b", "i32"], + ], + "r": ["i32",], + }) + }) + invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None) + with self.assertRaisesRegexp(ValueError, + "a required argument was not specified"): + result = invoker(a=1, b=2) + + def testMissingKeyword(self): + + def invoke(arg_list, ret_list): + ret_list.push_int(3) + + vm_context = MockVmContext(invoke) + vm_function = MockVmFunction( + reflection={ + "iree.abi": + json.dumps({ + "a": [ + "i32", + ["named", "a", "i32"], + ["named", "b", "i32"], + ], + "r": ["i32",], + }) + }) + invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None) + with self.assertRaisesRegexp(ValueError, + "a required argument was not specified"): + result = invoker(-1, a=1) + + def testExtraKeyword(self): + + def invoke(arg_list, ret_list): + ret_list.push_int(3) + + vm_context = MockVmContext(invoke) + vm_function = MockVmFunction( + reflection={ + "iree.abi": + json.dumps({ + "a": [ + "i32", + ["named", "a", "i32"], + ["named", "b", "i32"], + ], + "r": ["i32",], + }) + }) + invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None) + with self.assertRaisesRegexp(ValueError, "specified kwarg 'c' is unknown"): + result = invoker(-1, a=1, b=2, c=3) + + +if __name__ == "__main__": + absltest.main()
diff --git a/bindings/python/iree/runtime/setup.py.in b/bindings/python/iree/runtime/setup.py.in index 4c84cc2..a20d22f 100644 --- a/bindings/python/iree/runtime/setup.py.in +++ b/bindings/python/iree/runtime/setup.py.in
@@ -30,7 +30,7 @@ "Operating System :: OS Independent", "Development Status :: 3 - Alpha", ], - python_requires=">=3.6", + python_requires=">=3.7", packages=find_namespace_packages( include=["iree.runtime", "iree.runtime.*"]), ext_modules=[
diff --git a/build_tools/buildkite/cmake/android/arm64-v8a/benchmark.yml b/build_tools/buildkite/cmake/android/arm64-v8a/benchmark.yml index 35181d8..6f908ac 100644 --- a/build_tools/buildkite/cmake/android/arm64-v8a/benchmark.yml +++ b/build_tools/buildkite/cmake/android/arm64-v8a/benchmark.yml
@@ -7,9 +7,9 @@ steps: - label: "build" commands: - - "docker run --user=$(id -u):$(id -g) --volume=\\$PWD:\\$IREE_DOCKER_WORKDIR --workdir=\\$IREE_DOCKER_WORKDIR --rm gcr.io/iree-oss/cmake-android@sha256:a152f0d006e237105f8ed9a7e041a6a235c1a69dc1e209383c81087c574b39bf build_tools/kokoro/gcp_ubuntu/cmake/android/build.sh arm64-v8a" + - "docker run --user=$(id -u):$(id -g) --volume=\\$PWD:\\$IREE_DOCKER_WORKDIR --workdir=\\$IREE_DOCKER_WORKDIR --rm gcr.io/iree-oss/cmake-android@sha256:7d780787608474301e74e1b5cc2a1bfd1304a79ed9e0774c7ed422c0e4a38625 build_tools/kokoro/gcp_ubuntu/cmake/android/build.sh arm64-v8a" - "python3 build_tools/mako/prepare_benchmark_files.py" - - "docker run --user=$(id -u):$(id -g) --volume=\\$PWD:\\$IREE_DOCKER_WORKDIR --workdir=\\$IREE_DOCKER_WORKDIR --rm gcr.io/iree-oss/cmake-android@sha256:a152f0d006e237105f8ed9a7e041a6a235c1a69dc1e209383c81087c574b39bf python3 build_tools/mako/compile_android_modules.py" + - "docker run --user=$(id -u):$(id -g) --volume=\\$PWD:\\$IREE_DOCKER_WORKDIR --workdir=\\$IREE_DOCKER_WORKDIR --rm gcr.io/iree-oss/cmake-android@sha256:7d780787608474301e74e1b5cc2a1bfd1304a79ed9e0774c7ed422c0e4a38625 python3 build_tools/mako/compile_android_modules.py" - "tar -czvf model-Pixel4-artifacts.tgz build-android/iree/tools/iree-benchmark-module *Pixel4*.vmfb" - "tar -czvf model-S20-artifacts.tgz build-android/iree/tools/iree-benchmark-module *S20*.vmfb" - "tar -czvf flagfiles.tgz *_flagfile"
diff --git a/build_tools/buildkite/cmake/android/arm64-v8a/benchmark2.yml b/build_tools/buildkite/cmake/android/arm64-v8a/benchmark2.yml index b8793d0..7c1b864 100644 --- a/build_tools/buildkite/cmake/android/arm64-v8a/benchmark2.yml +++ b/build_tools/buildkite/cmake/android/arm64-v8a/benchmark2.yml
@@ -9,7 +9,7 @@ steps: - label: "Build" commands: - - "docker run --user=$(id -u):$(id -g) --volume=\\$PWD:\\$IREE_DOCKER_WORKDIR --workdir=\\$IREE_DOCKER_WORKDIR --rm gcr.io/iree-oss/cmake-android@sha256:eb72a4864f30ba53868cf23caf44e60aca35a36682ed1b1a5fa37cd9fda8855c build_tools/kokoro/gcp_ubuntu/cmake/android/build.sh arm64-v8a" + - "docker run --user=$(id -u):$(id -g) --volume=\\$PWD:\\$IREE_DOCKER_WORKDIR --workdir=\\$IREE_DOCKER_WORKDIR --rm gcr.io/iree-oss/cmake-android@sha256:7d780787608474301e74e1b5cc2a1bfd1304a79ed9e0774c7ed422c0e4a38625 build_tools/kokoro/gcp_ubuntu/cmake/android/build.sh arm64-v8a" - "tar --exclude='*.tar.gz' --exclude='*.tgz' --exclude='*.mlir' -czvf benchmark-suites.tgz build-host/benchmark_suites" - "tar -czvf iree-android-tools.tgz build-android/iree/tools/iree-*-module" if: "build.pull_request.id == null || (build.pull_request.labels includes 'buildkite:benchmark')"
diff --git a/build_tools/buildkite/cmake/android/arm64-v8a/pipeline.yml b/build_tools/buildkite/cmake/android/arm64-v8a/pipeline.yml index 98808a2..e10fb90 100644 --- a/build_tools/buildkite/cmake/android/arm64-v8a/pipeline.yml +++ b/build_tools/buildkite/cmake/android/arm64-v8a/pipeline.yml
@@ -7,7 +7,7 @@ steps: - label: "build" commands: - - "docker run --user=$(id -u):$(id -g) --volume=\\$PWD:\\$IREE_DOCKER_WORKDIR --workdir=\\$IREE_DOCKER_WORKDIR --rm gcr.io/iree-oss/cmake-android@sha256:a152f0d006e237105f8ed9a7e041a6a235c1a69dc1e209383c81087c574b39bf build_tools/kokoro/gcp_ubuntu/cmake/android/build.sh arm64-v8a" + - "docker run --user=$(id -u):$(id -g) --volume=\\$PWD:\\$IREE_DOCKER_WORKDIR --workdir=\\$IREE_DOCKER_WORKDIR --rm gcr.io/iree-oss/cmake-android@sha256:7d780787608474301e74e1b5cc2a1bfd1304a79ed9e0774c7ed422c0e4a38625 build_tools/kokoro/gcp_ubuntu/cmake/android/build.sh arm64-v8a" - "tar --exclude='*.o' --exclude='*.a' -czvf build-artifacts.tgz build-android" agents: - "queue=build" @@ -23,7 +23,7 @@ - "tar xzf build-artifacts.tgz" - "find build-android/ -name '*.cmake' -exec sed -i \"s!\\$IREE_DOCKER_WORKDIR/!\\$PWD/!g\" {} \\;" - "cd build-android/" - - "ctest --output-on-failure" + - "ctest --timeout 900 --output-on-failure" agents: - "android-soc=exynos-990" - "queue=test-android" @@ -37,7 +37,7 @@ - "tar xzf build-artifacts.tgz" - "find build-android/ -name '*.cmake' -exec sed -i \"s!\\$IREE_DOCKER_WORKDIR/!\\$PWD/!g\" {} \\;" - "cd build-android/" - - "ctest --output-on-failure" + - "ctest --timeout 900 --output-on-failure" agents: - "android-soc=exynos-9820" - "queue=test-android" @@ -53,7 +53,7 @@ - "cd build-android/" # vulkan tests using khr_shader_float16_int8 are failing on pixel4. # Disabling it until we identify the root cause. - - "ctest --output-on-failure --label-exclude \"^vulkan_uses_vk_khr_shader_float16_int8\\$\"" + - "ctest --timeout 900 --output-on-failure --label-exclude \"^vulkan_uses_vk_khr_shader_float16_int8\\$\"" agents: - "android-soc=snapdragon-855" - "queue=test-android"
diff --git a/build_tools/buildkite/samples.yml b/build_tools/buildkite/samples.yml index 44a05a3..6844965 100644 --- a/build_tools/buildkite/samples.yml +++ b/build_tools/buildkite/samples.yml
@@ -7,7 +7,7 @@ steps: - label: "Test Colab notebooks" commands: - - "docker run --user=$(id -u):$(id -g) --volume=\\$PWD:\\$IREE_DOCKER_WORKDIR --workdir=\\$IREE_DOCKER_WORKDIR --rm gcr.io/iree-oss/samples@sha256:be5465585706b620d6c722caa6237eafdfaa8dd11ce20db0981b979f2d3387b3 python3 colab/test_notebooks.py" + - "docker run --user=$(id -u):$(id -g) --volume=\\$PWD:\\$IREE_DOCKER_WORKDIR --workdir=\\$IREE_DOCKER_WORKDIR --rm gcr.io/iree-oss/samples@sha256:e4099b5ed2a2b9292402efb4c8537e5fb465099c5fc329df67d8dbe54761471e python3 colab/test_notebooks.py" env: IREE_DOCKER_WORKDIR: "/usr/src/github/iree" agents: @@ -15,7 +15,7 @@ - label: "Test Samples" commands: - - "docker run --user=$(id -u):$(id -g) --volume=\\$PWD:\\$IREE_DOCKER_WORKDIR --workdir=\\$IREE_DOCKER_WORKDIR --rm gcr.io/iree-oss/samples@sha256:be5465585706b620d6c722caa6237eafdfaa8dd11ce20db0981b979f2d3387b3 build_tools/testing/test_samples.sh" + - "docker run --user=$(id -u):$(id -g) --volume=\\$PWD:\\$IREE_DOCKER_WORKDIR --workdir=\\$IREE_DOCKER_WORKDIR --rm gcr.io/iree-oss/samples@sha256:e4099b5ed2a2b9292402efb4c8537e5fb465099c5fc329df67d8dbe54761471e build_tools/testing/test_samples.sh" env: IREE_DOCKER_WORKDIR: "/usr/src/github/iree" agents:
diff --git a/build_tools/cmake/iree_bytecode_module.cmake b/build_tools/cmake/iree_bytecode_module.cmake index cc31780..f500a9e 100644 --- a/build_tools/cmake/iree_bytecode_module.cmake +++ b/build_tools/cmake/iree_bytecode_module.cmake
@@ -13,8 +13,7 @@ # Parameters: # NAME: Name of target (see Note). # SRC: Source file to compile into a bytecode module. -# FLAGS: Flags to pass to the translation tool (list of strings). The -# default flag set is "-iree-mlir-to-vm-bytecode-module". +# FLAGS: Flags to pass to the translation tool (list of strings). # TRANSLATE_TOOL: Translation tool to invoke (CMake target). The default # tool is "iree-translate". # C_IDENTIFIER: Identifier to use for generate c embed code. @@ -42,12 +41,7 @@ return() endif() - # Set defaults for FLAGS and TRANSLATE_TOOL - if(DEFINED _RULE_FLAGS) - set(_FLAGS ${_RULE_FLAGS}) - else() - set(_FLAGS "-iree-mlir-to-vm-bytecode-module") - endif() + # Set default for TRANSLATE_TOOL. if(DEFINED _RULE_TRANSLATE_TOOL) set(_TRANSLATE_TOOL ${_RULE_TRANSLATE_TOOL}) else() @@ -57,7 +51,7 @@ iree_get_executable_path(_TRANSLATE_TOOL_EXECUTABLE ${_TRANSLATE_TOOL}) iree_get_executable_path(_EMBEDDED_LINKER_TOOL_EXECUTABLE "lld") - set(_ARGS "${_FLAGS}") + set(_ARGS "${_RULE_FLAGS}") list(APPEND _ARGS "${CMAKE_CURRENT_SOURCE_DIR}/${_RULE_SRC}") list(APPEND _ARGS "-o") list(APPEND _ARGS "${_RULE_NAME}.vmfb")
diff --git a/build_tools/cmake/iree_cc_test.cmake b/build_tools/cmake/iree_cc_test.cmake index 4651837..dfc8bc6 100644 --- a/build_tools/cmake/iree_cc_test.cmake +++ b/build_tools/cmake/iree_cc_test.cmake
@@ -5,7 +5,6 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception include(CMakeParseArguments) -include(iree_installed_test) # iree_cc_test() # @@ -126,8 +125,6 @@ string(REPLACE "::" "/" _PACKAGE_PATH ${_PACKAGE_NS}) set(_TEST_NAME "${_PACKAGE_PATH}/${_RULE_NAME}") - list(APPEND _RULE_LABELS "${_PACKAGE_PATH}") - # Case for cross-compiling towards Android. if(ANDROID) set(_ANDROID_REL_DIR "${_PACKAGE_PATH}/${_RULE_NAME}") @@ -152,24 +149,22 @@ TEST_TMPDIR=${_ANDROID_ABS_DIR}/test_tmpdir ) set_property(TEST ${_TEST_NAME} PROPERTY ENVIRONMENT ${_ENVIRONMENT_VARS}) - set_property(TEST ${_TEST_NAME} PROPERTY LABELS "${_RULE_LABELS}") else(ANDROID) - iree_add_installed_test( - TEST_NAME "${_TEST_NAME}" - LABELS "${_RULE_LABELS}" + add_test( + NAME + ${_TEST_NAME} COMMAND # We run all our tests through a custom test runner to allow temp # directory cleanup upon test completion. "${CMAKE_SOURCE_DIR}/build_tools/cmake/run_test.${IREE_HOST_SCRIPT_EXT}" "$<TARGET_FILE:${_NAME}>" - INSTALLED_COMMAND - # Must match install destination below. - "${_PACKAGE_PATH}/$<TARGET_FILE_NAME:${_NAME}>" - ) + WORKING_DIRECTORY + "${CMAKE_BINARY_DIR}" + ) + set_property(TEST ${_TEST_NAME} PROPERTY ENVIRONMENT "TEST_TMPDIR=${CMAKE_BINARY_DIR}/${_NAME}_test_tmpdir") + iree_add_test_environment_properties(${_TEST_NAME}) endif(ANDROID) - install(TARGETS ${_NAME} - DESTINATION "tests/${_PACKAGE_PATH}" - COMPONENT Tests - ) + list(APPEND _RULE_LABELS "${_PACKAGE_PATH}") + set_property(TEST ${_TEST_NAME} PROPERTY LABELS "${_RULE_LABELS}") endfunction()
diff --git a/build_tools/cmake/iree_copts.cmake b/build_tools/cmake/iree_copts.cmake index a3a4d42..2a6e541 100644 --- a/build_tools/cmake/iree_copts.cmake +++ b/build_tools/cmake/iree_copts.cmake
@@ -317,6 +317,11 @@ # TODO(#898): add a dedicated size-constrained configuration. if(${IREE_SIZE_OPTIMIZED}) iree_select_compiler_opts(IREE_SIZE_OPTIMIZED_DEFAULT_COPTS + CLANG_OR_GCC + "-DIREE_STATUS_MODE=0" + "-DIREE_HAL_MODULE_STRING_UTIL_ENABLE=0" + "-DIREE_VM_EXT_I64_ENABLE=0" + "-DIREE_VM_EXT_F32_ENABLE=0" MSVC_OR_CLANG_CL "/GS-" "/GL" @@ -367,61 +372,9 @@ # TODO(benvanik): MSVC options. #------------------------------------------------------------------------------- -# Third party: benchmark +# Third party: llvm-project #------------------------------------------------------------------------------- -set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE) -set(BENCHMARK_ENABLE_INSTALL OFF CACHE BOOL "" FORCE) - -#------------------------------------------------------------------------------- -# Third party: cpuinfo -#------------------------------------------------------------------------------- - -set(CPUINFO_BUILD_TOOLS ON CACHE BOOL "" FORCE) - -set(CPUINFO_BUILD_BENCHMARKS OFF CACHE BOOL "" FORCE) -set(CPUINFO_BUILD_UNIT_TESTS OFF CACHE BOOL "" FORCE) -set(CPUINFO_BUILD_MOCK_TESTS OFF CACHE BOOL "" FORCE) - -#------------------------------------------------------------------------------- -# Third party: flatcc -#------------------------------------------------------------------------------- - -set(FLATCC_TEST OFF CACHE BOOL "" FORCE) -set(FLATCC_CXX_TEST OFF CACHE BOOL "" FORCE) -set(FLATCC_REFLECTION OFF CACHE BOOL "" FORCE) -set(FLATCC_ALLOW_WERROR OFF CACHE BOOL "" FORCE) - -if(CMAKE_CROSSCOMPILING) - set(FLATCC_RTONLY ON CACHE BOOL "" FORCE) -else() - set(FLATCC_RTONLY OFF CACHE BOOL "" FORCE) -endif() - -#------------------------------------------------------------------------------- -# Third party: gtest -#------------------------------------------------------------------------------- - -set(INSTALL_GTEST OFF CACHE BOOL "" FORCE) -set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) - -#------------------------------------------------------------------------------- -# Third party: llvm/mlir -#------------------------------------------------------------------------------- - -set(LLVM_INCLUDE_EXAMPLES OFF CACHE BOOL "" FORCE) -set(LLVM_INCLUDE_TESTS OFF CACHE BOOL "" FORCE) -set(LLVM_INCLUDE_BENCHMARKS OFF CACHE BOOL "" FORCE) -set(LLVM_APPEND_VC_REV OFF CACHE BOOL "" FORCE) -set(LLVM_ENABLE_IDE ON CACHE BOOL "" FORCE) - -# TODO(ataei): Use optional build time targets selection for LLVMAOT. -set(LLVM_TARGETS_TO_BUILD "WebAssembly;X86;ARM;AArch64;RISCV;NVPTX;AMDGPU" - CACHE STRING "" FORCE) - -set(LLVM_ENABLE_PROJECTS "mlir;lld" CACHE STRING "" FORCE) -set(LLVM_ENABLE_BINDINGS OFF CACHE BOOL "" FORCE) - set(MLIR_TABLEGEN_EXE mlir-tblgen) # iree-tblgen is not defined using the add_tablegen mechanism as other TableGen # tools in LLVM. @@ -434,23 +387,3 @@ if(IREE_ENABLE_EMITC) add_definitions(-DIREE_HAVE_EMITC_DIALECT) endif() - -#------------------------------------------------------------------------------- -# Third party: SPIRV-Cross -#------------------------------------------------------------------------------- - -if(${IREE_TARGET_BACKEND_METAL-SPIRV}) - set(SPIRV_CROSS_ENABLE_MSL ON CACHE BOOL "" FORCE) - set(SPIRV_CROSS_ENABLE_GLSL ON CACHE BOOL "" FORCE) # Required to enable MSL - - set(SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS OFF CACHE BOOL "" FORCE) - set(SPIRV_CROSS_CLI OFF CACHE BOOL "" FORCE) - set(SPIRV_CROSS_ENABLE_TESTS OFF CACHE BOOL "" FORCE) - set(SPIRV_CROSS_SKIP_INSTALL ON CACHE BOOL "" FORCE) - - set(SPIRV_CROSS_ENABLE_HLSL OFF CACHE BOOL "" FORCE) - set(SPIRV_CROSS_ENABLE_CPP OFF CACHE BOOL "" FORCE) - set(SPIRV_CROSS_ENABLE_REFLECT OFF CACHE BOOL "" FORCE) - set(SPIRV_CROSS_ENABLE_C_API OFF CACHE BOOL "" FORCE) - set(SPIRV_CROSS_ENABLE_UTIL OFF CACHE BOOL "" FORCE) -endif()
diff --git a/build_tools/cmake/iree_installed_test.cmake b/build_tools/cmake/iree_installed_test.cmake deleted file mode 100644 index 1f99997..0000000 --- a/build_tools/cmake/iree_installed_test.cmake +++ /dev/null
@@ -1,92 +0,0 @@ -# Copyright 2020 The IREE Authors -# -# Licensed under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -# iree_add_installed_test() -# -# Creates a build-time and exported install-time test. All tests are installed -# into the tests/ tree. Calling code must arrange to install dependencies of the -# test into that tree. -# -# Parameters: -# TEST_NAME: Name of the test (as in "some/path/to/test"). -# COMMAND: Passed to add_test() as is. -# ENVIRONMENT: Set as the ENVIRONMENT property of the build-time test. -# INSTALLED_COMMAND: Corrollary to the 'COMMAND' argument but added to the -# install time definition. -# WORKING_DIRECTORY: Passed to add_test() as is. Note that in the install tree -# all tests run in the tests/ directory. -# LABELS: Labels to pass to add_test() and installed tests. -function(iree_add_installed_test) - cmake_parse_arguments( - _RULE - "" - "TEST_NAME" - "COMMAND;ENVIRONMENT;INSTALLED_COMMAND;WORKING_DIRECTORY;LABELS" - ${ARGN} - ) - - - add_test( - NAME - ${_RULE_TEST_NAME} - COMMAND - ${_RULE_COMMAND} - ) - if (DEFINED _RULE_WORKING_DIRECTORY) - set_property( - TEST - ${_RULE_TEST_NAME} - PROPERTY WORKING_DIRECTORY - "${_RULE_WORKING_DIRECTORY}" - ) - endif() - set_property( - TEST - ${_RULE_TEST_NAME} - PROPERTY LABELS - "${_RULE_LABELS}" - ) - set_property( - TEST - ${_RULE_TEST_NAME} - PROPERTY ENVIRONMENT - "TEST_TMPDIR=${CMAKE_BINARY_DIR}/${_RULE_TEST_NAME}_test_tmpdir" - ${_RULE_ENVIRONMENT} - ) - iree_add_test_environment_properties(${_RULE_TEST_NAME}) - - # Write the to the installed ctest file template. - set(_installed_ctest_input_file - "${CMAKE_BINARY_DIR}/iree_installed_tests.cmake.in") - get_property(_has_tests GLOBAL PROPERTY IREE_HAS_INSTALLED_TESTS) - if(NOT _has_tests) - # First time. - file(WRITE "${_installed_ctest_input_file}") # Truncate. - set_property(GLOBAL PROPERTY IREE_HAS_INSTALLED_TESTS ON) - endif() - - # Now write directives to the installed tests cmake file. - file(APPEND "${_installed_ctest_input_file}" - "add_test(${_RULE_TEST_NAME} ${_RULE_INSTALLED_COMMAND})\n" - "set_tests_properties(${_RULE_TEST_NAME} PROPERTIES LABELS \"${_RULE_LABELS}\")\n" - ) - - # First time generation and setup to install. Note that since this all runs - # at the generate phase, it doesn't matter that we trigger it before all - # tests accumulate. - if(NOT _has_tests) - set(_installed_ctest_output_file "${CMAKE_BINARY_DIR}/iree_installed_tests.cmake") - file(GENERATE - OUTPUT "${_installed_ctest_output_file}" - INPUT "${_installed_ctest_input_file}" - ) - install(FILES "${_installed_ctest_output_file}" - DESTINATION tests - RENAME "CTestTestfile.cmake" - COMPONENT Tests - ) - endif() -endfunction()
diff --git a/build_tools/cmake/iree_lit_test.cmake b/build_tools/cmake/iree_lit_test.cmake index 9f54ec9..e898800 100644 --- a/build_tools/cmake/iree_lit_test.cmake +++ b/build_tools/cmake/iree_lit_test.cmake
@@ -5,7 +5,6 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception include(CMakeParseArguments) -include(iree_installed_test) # iree_lit_test() # @@ -61,11 +60,9 @@ iree_package_ns(_PACKAGE_NS) string(REPLACE "::" "/" _PACKAGE_PATH ${_PACKAGE_NS}) set(_NAME_PATH "${_PACKAGE_PATH}/${_RULE_NAME}") - list(APPEND _RULE_LABELS "${_PACKAGE_PATH}") - - iree_add_installed_test( - TEST_NAME "${_NAME_PATH}" - LABELS "${_RULE_LABELS}" + add_test( + NAME + ${_NAME_PATH} COMMAND # We run all our tests through a custom test runner to allow setup # and teardown. @@ -73,18 +70,16 @@ "${CMAKE_SOURCE_DIR}/iree/tools/run_lit.${IREE_HOST_SCRIPT_EXT}" ${_TEST_FILE_PATH} ${_DATA_DEP_PATHS} - INSTALLED_COMMAND - # TODO: Make the lit runner be not a shell script and more cross-platform. - # Note that the data deps are not bundled: must be externally on the path. - bin/run_lit.${IREE_HOST_SCRIPT_EXT} - ${_TEST_FILE_PATH} + WORKING_DIRECTORY + "${CMAKE_CURRENT_BINARY_DIR}" ) - set_property(TEST ${_NAME_PATH} PROPERTY REQUIRED_FILES "${_TEST_FILE_PATH}") - install(FILES ${_TEST_FILE_PATH} - DESTINATION "tests/${_PACKAGE_PATH}" - COMPONENT Tests - ) + list(APPEND _RULE_LABELS "${_PACKAGE_PATH}") + set_property(TEST ${_NAME_PATH} PROPERTY LABELS "${_RULE_LABELS}") + set_property(TEST ${_NAME_PATH} PROPERTY REQUIRED_FILES "${_TEST_FILE_PATH}") + set_property(TEST ${_NAME_PATH} PROPERTY ENVIRONMENT "TEST_TMPDIR=${_NAME}_test_tmpdir") + iree_add_test_environment_properties(${_NAME_PATH}) + # TODO(gcmn): Figure out how to indicate a dependency on _RULE_DATA being built endfunction()
diff --git a/build_tools/cmake/iree_python.cmake b/build_tools/cmake/iree_python.cmake index 6adcb34..f86f4e9 100644 --- a/build_tools/cmake/iree_python.cmake +++ b/build_tools/cmake/iree_python.cmake
@@ -5,7 +5,6 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception include(CMakeParseArguments) -include(iree_installed_test) ############################################################################### # Main user rules @@ -257,6 +256,7 @@ # NAME: name of test # SRCS: Test source file # ARGS: Command line arguments to the Python source file. +# DEPS: List of deps the test requires # LABELS: Additional labels to apply to the test. The package path is added # automatically. # GENERATED_IN_BINARY_DIR: If present, indicates that the srcs have been @@ -269,8 +269,8 @@ cmake_parse_arguments( _RULE "GENERATED_IN_BINARY_DIR" - "NAME;SRCS" - "ARGS;LABELS" + "NAME" + "ARGS;DEPS;LABELS;SRCS" ${ARGN} ) @@ -288,26 +288,17 @@ set(_NAME_PATH "${_PACKAGE_PATH}/${_RULE_NAME}") list(APPEND _RULE_LABELS "${_PACKAGE_PATH}") - iree_add_installed_test( - TEST_NAME "${_NAME_PATH}" - LABELS "${_RULE_LABELS}" - ENVIRONMENT - "PYTHONPATH=${CMAKE_BINARY_DIR}/bindings/python:$ENV{PYTHONPATH}" + add_test( + NAME ${_NAME} COMMAND "${CMAKE_SOURCE_DIR}/build_tools/cmake/run_test.${IREE_HOST_SCRIPT_EXT}" "${Python3_EXECUTABLE}" - "${_SRC_DIR}/${_RULE_SRCS}" - ${_RULE_ARGS} - INSTALLED_COMMAND - python - "${_PACKAGE_PATH}/${_RULE_SRCS}" + "${CMAKE_CURRENT_SOURCE_DIR}/${_RULE_SRCS}" + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ) - install(FILES ${_RULE_SRCS} - DESTINATION "tests/${_PACKAGE_PATH}" - COMPONENT Tests - ) - + set_property(TEST ${_NAME} PROPERTY LABELS "${_RULE_LABELS}") + set_property(TEST ${_NAME} PROPERTY ENVIRONMENT "PYTHONPATH=${CMAKE_BINARY_DIR}/bindings/python:$ENV{PYTHONPATH};TEST_TMPDIR=${_NAME}_${V}_test_tmpdir") # TODO(marbre): Find out how to add deps to tests. # Similar to _RULE_DATA in iree_lit_test(). endfunction()
diff --git a/build_tools/cmake/iree_third_party_cmake_options.cmake b/build_tools/cmake/iree_third_party_cmake_options.cmake new file mode 100644 index 0000000..e960589 --- /dev/null +++ b/build_tools/cmake/iree_third_party_cmake_options.cmake
@@ -0,0 +1,67 @@ +# Copyright 2021 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +macro(iree_set_benchmark_cmake_options) + set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE) + set(BENCHMARK_ENABLE_INSTALL OFF CACHE BOOL "" FORCE) +endmacro() + +macro(iree_set_cpuinfo_cmake_options) + set(CPUINFO_BUILD_TOOLS ON CACHE BOOL "" FORCE) + + set(CPUINFO_BUILD_BENCHMARKS OFF CACHE BOOL "" FORCE) + set(CPUINFO_BUILD_UNIT_TESTS OFF CACHE BOOL "" FORCE) + set(CPUINFO_BUILD_MOCK_TESTS OFF CACHE BOOL "" FORCE) +endmacro() + +macro(iree_set_flatcc_cmake_options) + set(FLATCC_TEST OFF CACHE BOOL "" FORCE) + set(FLATCC_CXX_TEST OFF CACHE BOOL "" FORCE) + set(FLATCC_REFLECTION OFF CACHE BOOL "" FORCE) + set(FLATCC_ALLOW_WERROR OFF CACHE BOOL "" FORCE) + + if(CMAKE_CROSSCOMPILING) + set(FLATCC_RTONLY ON CACHE BOOL "" FORCE) + else() + set(FLATCC_RTONLY OFF CACHE BOOL "" FORCE) + endif() +endmacro() + +macro(iree_set_googletest_cmake_options) + set(INSTALL_GTEST OFF CACHE BOOL "" FORCE) + set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) +endmacro() + +macro(iree_set_llvm_cmake_options) + set(LLVM_INCLUDE_EXAMPLES OFF CACHE BOOL "" FORCE) + set(LLVM_INCLUDE_TESTS OFF CACHE BOOL "" FORCE) + set(LLVM_INCLUDE_BENCHMARKS OFF CACHE BOOL "" FORCE) + set(LLVM_APPEND_VC_REV OFF CACHE BOOL "" FORCE) + set(LLVM_ENABLE_IDE ON CACHE BOOL "" FORCE) + + # TODO(ataei): Use optional build time targets selection for LLVMAOT. + set(LLVM_TARGETS_TO_BUILD "WebAssembly;X86;ARM;AArch64;RISCV;NVPTX;AMDGPU" + CACHE STRING "" FORCE) + + set(LLVM_ENABLE_PROJECTS "mlir;lld" CACHE STRING "" FORCE) + set(LLVM_ENABLE_BINDINGS OFF CACHE BOOL "" FORCE) +endmacro() + +macro(iree_set_spirv_cross_cmake_options) + set(SPIRV_CROSS_ENABLE_MSL ON CACHE BOOL "" FORCE) + set(SPIRV_CROSS_ENABLE_GLSL ON CACHE BOOL "" FORCE) # Required to enable MSL + + set(SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS OFF CACHE BOOL "" FORCE) + set(SPIRV_CROSS_CLI OFF CACHE BOOL "" FORCE) + set(SPIRV_CROSS_ENABLE_TESTS OFF CACHE BOOL "" FORCE) + set(SPIRV_CROSS_SKIP_INSTALL ON CACHE BOOL "" FORCE) + + set(SPIRV_CROSS_ENABLE_HLSL OFF CACHE BOOL "" FORCE) + set(SPIRV_CROSS_ENABLE_CPP OFF CACHE BOOL "" FORCE) + set(SPIRV_CROSS_ENABLE_REFLECT OFF CACHE BOOL "" FORCE) + set(SPIRV_CROSS_ENABLE_C_API OFF CACHE BOOL "" FORCE) + set(SPIRV_CROSS_ENABLE_UTIL OFF CACHE BOOL "" FORCE) +endmacro()
diff --git a/build_tools/cmake/test.sh b/build_tools/cmake/test.sh index da602bb..120d391 100755 --- a/build_tools/cmake/test.sh +++ b/build_tools/cmake/test.sh
@@ -67,4 +67,4 @@ label_exclude_regex="($(IFS="|" ; echo "${label_exclude_args[*]?}"))" cd "$BUILD_DIR" -ctest --output-on-failure --label-exclude "${label_exclude_regex?}" +ctest --timeout 900 --output-on-failure --label-exclude "${label_exclude_regex?}"
diff --git a/build_tools/docker/cmake-android/Dockerfile b/build_tools/docker/cmake-android/Dockerfile index 605903e..c5d8b77 100644 --- a/build_tools/docker/cmake-android/Dockerfile +++ b/build_tools/docker/cmake-android/Dockerfile
@@ -16,7 +16,7 @@ RUN unzip "android-ndk-${NDK_VERSION?}-linux-x86_64.zip" -d /usr/src/ -FROM gcr.io/iree-oss/cmake-python@sha256:24892ba1d23b8aaf6e4b9c700f524e7de1f19cff903fa6a5dc87579d97077453 AS final +FROM gcr.io/iree-oss/cmake-python@sha256:51817f1a98f9ed9237577133b4c674b163280fd747c1745d6d0d93f0f2b01fb3 AS final ARG NDK_VERSION COPY --from=install-ndk "/usr/src/android-ndk-${NDK_VERSION}" "/usr/src/android-ndk-${NDK_VERSION}" ENV ANDROID_NDK "/usr/src/android-ndk-${NDK_VERSION}"
diff --git a/build_tools/docker/cmake-bazel-frontends-android/Dockerfile b/build_tools/docker/cmake-bazel-frontends-android/Dockerfile index 3e67f03..84a1644 100644 --- a/build_tools/docker/cmake-bazel-frontends-android/Dockerfile +++ b/build_tools/docker/cmake-bazel-frontends-android/Dockerfile
@@ -4,12 +4,12 @@ # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -FROM gcr.io/iree-oss/cmake-bazel-frontends@sha256:8974ee20d855ecfc8b9e511f4fb4a25d67888952738d2c457d9fd91116b6b67f AS final +FROM gcr.io/iree-oss/cmake-bazel-frontends@sha256:7633ff2b483a07b6c786ffab40cca7cae64d6a211ad0e95ff55d3f1cd2dd1ea9 AS final # Note that NDK_VERSION should be kept in sync with the one in cmake-android. # That actually means this isn't really an ARG because we cannot build this # image with a different NDK version. ARG NDK_VERSION=r21d -COPY --from=gcr.io/iree-oss/cmake-android@sha256:a152f0d006e237105f8ed9a7e041a6a235c1a69dc1e209383c81087c574b39bf "/usr/src/android-ndk-${NDK_VERSION}" "/usr/src/android-ndk-${NDK_VERSION}" +COPY --from=gcr.io/iree-oss/cmake-android@sha256:7d780787608474301e74e1b5cc2a1bfd1304a79ed9e0774c7ed422c0e4a38625 "/usr/src/android-ndk-${NDK_VERSION}" "/usr/src/android-ndk-${NDK_VERSION}" ENV ANDROID_NDK "/usr/src/android-ndk-${NDK_VERSION}"
diff --git a/build_tools/docker/cmake-bazel-frontends-nvidia/Dockerfile b/build_tools/docker/cmake-bazel-frontends-nvidia/Dockerfile index eb7de03..5c0f78f 100644 --- a/build_tools/docker/cmake-bazel-frontends-nvidia/Dockerfile +++ b/build_tools/docker/cmake-bazel-frontends-nvidia/Dockerfile
@@ -33,7 +33,7 @@ # does not support Ubuntu 18.04. # This allows to share configuration with base CMake, but it also means we need # to MATCH the driver version between the host machine and the docker image. -FROM gcr.io/iree-oss/cmake-bazel-frontends-vulkan@sha256:e99fd07a48e2b1a00200b3b600ff00878d413045cb7809fe73dac4c36fa4825a AS final +FROM gcr.io/iree-oss/cmake-bazel-frontends-vulkan@sha256:cdf41d7ee7707eb3e79d56f2f1f8bd7e9a0ac3a1122dc4f89f8190154796a6bc AS final ARG NVIDIA_COMMON_DEB ARG NVIDIA_GL_DEB ARG NVIDIA_COMPUTE_DEB
diff --git a/build_tools/docker/cmake-bazel-frontends-swiftshader/Dockerfile b/build_tools/docker/cmake-bazel-frontends-swiftshader/Dockerfile index 44f8c08..8ddc0c4 100644 --- a/build_tools/docker/cmake-bazel-frontends-swiftshader/Dockerfile +++ b/build_tools/docker/cmake-bazel-frontends-swiftshader/Dockerfile
@@ -4,7 +4,7 @@ # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -FROM gcr.io/iree-oss/cmake-bazel-frontends-vulkan@sha256:e99fd07a48e2b1a00200b3b600ff00878d413045cb7809fe73dac4c36fa4825a AS final +FROM gcr.io/iree-oss/cmake-bazel-frontends-vulkan@sha256:cdf41d7ee7707eb3e79d56f2f1f8bd7e9a0ac3a1122dc4f89f8190154796a6bc AS final COPY --from=gcr.io/iree-oss/swiftshader@sha256:ccae32c83c89a31e8fc5542e480c29f28bbf4a3b3b80198c06b687a92c6813f3 \ /swiftshader /swiftshader
diff --git a/build_tools/docker/cmake-bazel-frontends-vulkan/Dockerfile b/build_tools/docker/cmake-bazel-frontends-vulkan/Dockerfile index 03ce51d..7552acf 100644 --- a/build_tools/docker/cmake-bazel-frontends-vulkan/Dockerfile +++ b/build_tools/docker/cmake-bazel-frontends-vulkan/Dockerfile
@@ -7,7 +7,7 @@ # This image provides the Vulkan SDK. Requires a child image to provide # a Vulkan ICD. -FROM gcr.io/iree-oss/cmake-bazel-frontends@sha256:8974ee20d855ecfc8b9e511f4fb4a25d67888952738d2c457d9fd91116b6b67f AS final +FROM gcr.io/iree-oss/cmake-bazel-frontends@sha256:7633ff2b483a07b6c786ffab40cca7cae64d6a211ad0e95ff55d3f1cd2dd1ea9 AS final ARG VULKAN_SDK_VERSION=1.2.154.0
diff --git a/build_tools/docker/cmake-bazel-frontends/Dockerfile b/build_tools/docker/cmake-bazel-frontends/Dockerfile index 411ed20..e02cbc1 100644 --- a/build_tools/docker/cmake-bazel-frontends/Dockerfile +++ b/build_tools/docker/cmake-bazel-frontends/Dockerfile
@@ -6,13 +6,13 @@ FROM gcr.io/iree-oss/util@sha256:40846b4aea5886af3250399d6adfdb3e1195a8b0177706bb0375e812d62dc49c AS fetch-kws WORKDIR /fetch-kws -ARG KWS_COMMIT=4bfbfe4ec19fe39cf9d1803e894d8bfb5fd56a45 +ARG KWS_COMMIT=168f27a070dcd4b0ce39a70f9a702608ff10eb44 RUN wget "https://github.com/google-research/google-research/tarball/${KWS_COMMIT?}" \ -O google-research.tar.gz \ && tar --extract --gzip --file=google-research.tar.gz \ --wildcards */kws_streaming --strip-components=1 -FROM gcr.io/iree-oss/cmake-python@sha256:24892ba1d23b8aaf6e4b9c700f524e7de1f19cff903fa6a5dc87579d97077453 AS final +FROM gcr.io/iree-oss/cmake-python@sha256:51817f1a98f9ed9237577133b4c674b163280fd747c1745d6d0d93f0f2b01fb3 AS final # Install bazel. COPY --from=gcr.io/iree-oss/bazel@sha256:5e52c7d43b6fdff35d884b8b8b92b1b6e2151d675019edc92f09018e558e0f94 \ @@ -20,7 +20,7 @@ /usr/bin/ # Install tensorflow. -RUN python3 -m pip install tf-nightly==2.6.0.dev20210527 +RUN python3 -m pip install tf-nightly==2.7.0.dev20210806 # Install JAX. RUN python3 -m pip install --upgrade jax jaxlib flax
diff --git a/build_tools/docker/cmake-python-nvidia/Dockerfile b/build_tools/docker/cmake-python-nvidia/Dockerfile index 8716d17..5fe7bb5 100644 --- a/build_tools/docker/cmake-python-nvidia/Dockerfile +++ b/build_tools/docker/cmake-python-nvidia/Dockerfile
@@ -36,7 +36,7 @@ # does not support Ubuntu 18.04. # This allows to share configuration with base CMake, but it also means we need # to MATCH the driver version between the host machine and the docker image. -FROM gcr.io/iree-oss/cmake-python-vulkan@sha256:899262e823b2d73a3ca9709d2ba3be7b21a9ffe41203e72a621a61844763f6b9 AS final +FROM gcr.io/iree-oss/cmake-python-vulkan@sha256:894d91b6ddd3435f0e5cb4424a81296438252dae4d8934e84aa6be4d02b81343 AS final ARG NVIDIA_COMMON_DEB ARG NVIDIA_GL_DEB ARG NVIDIA_COMPUTE_DEB
diff --git a/build_tools/docker/cmake-python-swiftshader/Dockerfile b/build_tools/docker/cmake-python-swiftshader/Dockerfile index c4d444f..5fc85ed 100644 --- a/build_tools/docker/cmake-python-swiftshader/Dockerfile +++ b/build_tools/docker/cmake-python-swiftshader/Dockerfile
@@ -7,7 +7,7 @@ # An image for building IREE using CMake and testing IREE with SwiftShader # Vulkan implementation. -FROM gcr.io/iree-oss/cmake-python-vulkan@sha256:899262e823b2d73a3ca9709d2ba3be7b21a9ffe41203e72a621a61844763f6b9 AS final +FROM gcr.io/iree-oss/cmake-python-vulkan@sha256:894d91b6ddd3435f0e5cb4424a81296438252dae4d8934e84aa6be4d02b81343 AS final COPY --from=gcr.io/iree-oss/swiftshader@sha256:ccae32c83c89a31e8fc5542e480c29f28bbf4a3b3b80198c06b687a92c6813f3 \ /swiftshader /swiftshader
diff --git a/build_tools/docker/cmake-python-vulkan/Dockerfile b/build_tools/docker/cmake-python-vulkan/Dockerfile index a7ff93d..0a5826c 100644 --- a/build_tools/docker/cmake-python-vulkan/Dockerfile +++ b/build_tools/docker/cmake-python-vulkan/Dockerfile
@@ -8,7 +8,7 @@ # This image provides the Vulkan SDK. Requires a child image to provide # a Vulkan ICD. -FROM gcr.io/iree-oss/cmake-python@sha256:24892ba1d23b8aaf6e4b9c700f524e7de1f19cff903fa6a5dc87579d97077453 AS final +FROM gcr.io/iree-oss/cmake-python@sha256:51817f1a98f9ed9237577133b4c674b163280fd747c1745d6d0d93f0f2b01fb3 AS final ARG VULKAN_SDK_VERSION=1.2.154.0
diff --git a/build_tools/docker/cmake-python/Dockerfile b/build_tools/docker/cmake-python/Dockerfile index a0a1a60..ce84ea6 100644 --- a/build_tools/docker/cmake-python/Dockerfile +++ b/build_tools/docker/cmake-python/Dockerfile
@@ -10,9 +10,13 @@ # Dependencies for the python bindings tests. RUN apt-get update \ && apt-get install -y \ - python3 \ + python3.7 \ + python3.7-dev \ + && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.7 1 \ + && apt-get install -y \ python3-pip \ python3-setuptools \ + python3-distutils \ && python3 -m pip install --upgrade pip \ && python3 -m pip install \ numpy==1.19.4 absl-py==0.12.0 requests PyYAML==5.4.1 wheel==0.36.2 \
diff --git a/build_tools/docker/prod_digests.txt b/build_tools/docker/prod_digests.txt index dd0a5b7..a5ea3fe 100644 --- a/build_tools/docker/prod_digests.txt +++ b/build_tools/docker/prod_digests.txt
@@ -2,19 +2,19 @@ gcr.io/iree-oss/util@sha256:40846b4aea5886af3250399d6adfdb3e1195a8b0177706bb0375e812d62dc49c gcr.io/iree-oss/cmake@sha256:9d9953acf5ca0cf1ff3e8de32f10f24dfab1c4e8ec5d1fc047f556024ee4bed6 gcr.io/iree-oss/swiftshader@sha256:ccae32c83c89a31e8fc5542e480c29f28bbf4a3b3b80198c06b687a92c6813f3 -gcr.io/iree-oss/cmake-python@sha256:24892ba1d23b8aaf6e4b9c700f524e7de1f19cff903fa6a5dc87579d97077453 -gcr.io/iree-oss/cmake-android@sha256:a152f0d006e237105f8ed9a7e041a6a235c1a69dc1e209383c81087c574b39bf +gcr.io/iree-oss/cmake-python@sha256:51817f1a98f9ed9237577133b4c674b163280fd747c1745d6d0d93f0f2b01fb3 +gcr.io/iree-oss/cmake-android@sha256:7d780787608474301e74e1b5cc2a1bfd1304a79ed9e0774c7ed422c0e4a38625 gcr.io/iree-oss/bazel@sha256:5e52c7d43b6fdff35d884b8b8b92b1b6e2151d675019edc92f09018e558e0f94 gcr.io/iree-oss/vulkan@sha256:5812ee64806a7f3df0739ccf0930c27cabce346901488eceb1ee66c9c0a5ae96 -gcr.io/iree-oss/rbe-toolchain@sha256:d69c260b98a97ad430d34c4591fb2399e00888750f5d47ede00c1e6f3e774e5a -gcr.io/iree-oss/cmake-python-vulkan@sha256:899262e823b2d73a3ca9709d2ba3be7b21a9ffe41203e72a621a61844763f6b9 -gcr.io/iree-oss/cmake-python-swiftshader@sha256:75ad1156a486aa4bf3f93d45f9a7e0f052345fa103171dfc742f8c92d4a7ea0f -gcr.io/iree-oss/cmake-python-nvidia@sha256:f05191ba4fa3ccc392a4a1be0b0a49ea2a9c61b397f429430b6f1a2c6d7c8f59 -gcr.io/iree-oss/cmake-bazel-frontends@sha256:8974ee20d855ecfc8b9e511f4fb4a25d67888952738d2c457d9fd91116b6b67f -gcr.io/iree-oss/cmake-bazel-frontends-vulkan@sha256:e99fd07a48e2b1a00200b3b600ff00878d413045cb7809fe73dac4c36fa4825a -gcr.io/iree-oss/cmake-bazel-frontends-nvidia@sha256:71eeb44ba014ee043ae2adeeb6458bc281444ee6f295b5ba7e4337a69a95f7df -gcr.io/iree-oss/cmake-bazel-frontends-swiftshader@sha256:4e018bd74c630f89f86b700a47b6a6792c8f97e337870af69a000e578a3ca688 +gcr.io/iree-oss/rbe-toolchain@sha256:62b161e79413f0f59ae3845c377b10e60a4a639f3d32569a82b620f017837a68 +gcr.io/iree-oss/cmake-python-vulkan@sha256:894d91b6ddd3435f0e5cb4424a81296438252dae4d8934e84aa6be4d02b81343 +gcr.io/iree-oss/cmake-python-swiftshader@sha256:9c3e5b8ba0c8ab0a9ef55c7141c71365cb3fab6f63c197fd4c7195d054ca6906 +gcr.io/iree-oss/cmake-python-nvidia@sha256:d96ffdc44026bf112efca82a5150d783e8eba8976c7bc150863ec5868de40778 +gcr.io/iree-oss/cmake-bazel-frontends@sha256:7633ff2b483a07b6c786ffab40cca7cae64d6a211ad0e95ff55d3f1cd2dd1ea9 +gcr.io/iree-oss/cmake-bazel-frontends-vulkan@sha256:cdf41d7ee7707eb3e79d56f2f1f8bd7e9a0ac3a1122dc4f89f8190154796a6bc +gcr.io/iree-oss/cmake-bazel-frontends-nvidia@sha256:7a2189f9c2c5491878fdf6d38ddab18832020a06285eeff31b8376b9415fb7e9 +gcr.io/iree-oss/cmake-bazel-frontends-swiftshader@sha256:103676490242311b9fad841294689a7ce1c755b935a21d8d898c25cfe3ec15e8 gcr.io/iree-oss/cmake-riscv@sha256:95489593bc9b0cd325ce9c1a32b47389c01b174a5b8190a16d937d2e8828d384 -gcr.io/iree-oss/cmake-bazel-frontends-android@sha256:1392e3a27cddbdc597817168fb61e125bbdcbfd9076eff9d70bd8012b0a0c5ba -gcr.io/iree-oss/samples@sha256:be5465585706b620d6c722caa6237eafdfaa8dd11ce20db0981b979f2d3387b3 +gcr.io/iree-oss/cmake-bazel-frontends-android@sha256:cdb1b38772643f7acbc296b558ccc868900a47a1378cf63da3fbe469dcf42428 +gcr.io/iree-oss/samples@sha256:e4099b5ed2a2b9292402efb4c8537e5fb465099c5fc329df67d8dbe54761471e gcr.io/iree-oss/cmake-emscripten@sha256:8acad361d23cb586187c2ea29df3a1ab301b5283c3648beb328681d69ecd0ab0
diff --git a/build_tools/docker/rbe-toolchain/Dockerfile b/build_tools/docker/rbe-toolchain/Dockerfile index ef4d419..38188fe 100644 --- a/build_tools/docker/rbe-toolchain/Dockerfile +++ b/build_tools/docker/rbe-toolchain/Dockerfile
@@ -58,19 +58,9 @@ # Keep track of the commit we are using. RUN echo "${SWIFTSHADER_COMMIT?}" > /swiftshader/git-commit - - ######################## Final Image ########################################### FROM gcr.io/cloud-marketplace/google/rbe-ubuntu16-04@sha256:1a8ed713f40267bb51fe17de012fa631a20c52df818ccb317aaed2ee068dfc61 AS final -######################## Python 3 ############################################## -RUN apt-get update \ - && apt-get install -y \ - python3 \ - python3-pip \ - && python3 -m pip install --upgrade pip \ - && python3 -m pip install numpy - ######################## Vulkan SDK ############################################ ARG VULKAN_SDK_VERSION=1.2.154.0 @@ -92,20 +82,6 @@ # Set VK_ICD_FILENAMES so Vulkan loader can find the SwiftShader ICD. ENV VK_ICD_FILENAMES /swiftshader/vk_swiftshader_icd.json -RUN apt-get update && apt-get install -y software-properties-common -# apt-add-repository requires a version of python with the softwareproperties -# module. To use this command, we: -# 1. remove the symlink to python3 from python3.6 and symlink it to python3.5 -# 2. run apt-add-repository with python3 = python3.5 -# 3. resymlink python3 to /opt/python3.6/bin/python3.6 -# See https://github.com/google/iree/issues/1966 for more information. -RUN rm /usr/bin/python3 \ - && ln -s /usr/bin/python3.5 /usr/bin/python3 \ - && add-apt-repository ppa:deadsnakes/ppa \ - && rm /usr/bin/python3 \ - && ln -s /opt/python3.6/bin/python3.6 /usr/bin/python3 - -# Install python3.6-dev -RUN apt-get update \ - && apt-get install -y python3.6 python3.6-dev +######################## LibXML2 for LLD ####################################### +RUN apt-get update && apt-get install -y libxml2-dev
diff --git a/build_tools/docker/samples/Dockerfile b/build_tools/docker/samples/Dockerfile index b6b6d28..5b32cdb 100644 --- a/build_tools/docker/samples/Dockerfile +++ b/build_tools/docker/samples/Dockerfile
@@ -9,10 +9,10 @@ # * Vulkan (using SwiftShader) # * Python (including `venv` and common pip packages needed for Colab) -FROM gcr.io/iree-oss/cmake-python-swiftshader@sha256:75ad1156a486aa4bf3f93d45f9a7e0f052345fa103171dfc742f8c92d4a7ea0f AS final +FROM gcr.io/iree-oss/cmake-python-swiftshader@sha256:9c3e5b8ba0c8ab0a9ef55c7141c71365cb3fab6f63c197fd4c7195d054ca6906 AS final # Update setuptools per https://github.com/pypa/setuptools/issues/1694#issuecomment-466010982 -RUN apt-get update && apt-get install -y python3-venv python-setuptools && \ +RUN apt-get update && apt-get install -y python3-venv python3.7-venv python-setuptools && \ python3 -m pip install --upgrade setuptools # Install additional packages often used in notebooks.
diff --git a/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-swiftshader/build.sh b/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-swiftshader/build.sh index f74194c..5966d91 100755 --- a/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-swiftshader/build.sh +++ b/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-swiftshader/build.sh
@@ -64,6 +64,6 @@ export CTEST_PARALLEL_LEVEL=${CTEST_PARALLEL_LEVEL:-$(nproc)} echo "Testing with CTest" -ctest --output-on-failure \ +ctest --timeout 900 --output-on-failure \ --tests-regex "^integrations/tensorflow/|^bindings/python/" \ --label-exclude "^nokokoro$|^vulkan_uses_vk_khr_shader_float16_int8$"
diff --git a/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-swiftshader/build_kokoro.sh b/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-swiftshader/build_kokoro.sh index 37554c3..18ee482 100755 --- a/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-swiftshader/build_kokoro.sh +++ b/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-swiftshader/build_kokoro.sh
@@ -24,7 +24,7 @@ docker_setup docker run "${DOCKER_RUN_ARGS[@]?}" \ - gcr.io/iree-oss/cmake-bazel-frontends-swiftshader@sha256:4e018bd74c630f89f86b700a47b6a6792c8f97e337870af69a000e578a3ca688 \ + gcr.io/iree-oss/cmake-bazel-frontends-swiftshader@sha256:103676490242311b9fad841294689a7ce1c755b935a21d8d898c25cfe3ec15e8 \ build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-swiftshader/build.sh # Kokoro will rsync this entire directory back to the executor orchestrating the
diff --git a/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-turing/build.sh b/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-turing/build.sh index a5d1d99..eb92860 100755 --- a/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-turing/build.sh +++ b/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-turing/build.sh
@@ -68,7 +68,7 @@ # Only test drivers that use the GPU, since we run all tests on non-GPU machines # as well. echo "Testing with CTest" -ctest --output-on-failure \ +ctest --timeout 900 --output-on-failure \ --tests-regex "^integrations/tensorflow/|^bindings/python/" \ --label-regex "^driver=vulkan$|^driver=cuda$" \ --label-exclude "^nokokoro$"
diff --git a/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-turing/build_kokoro.sh b/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-turing/build_kokoro.sh index 513ac72..1c27734 100755 --- a/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-turing/build_kokoro.sh +++ b/build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-turing/build_kokoro.sh
@@ -30,7 +30,7 @@ docker run "${DOCKER_RUN_ARGS[@]?}" \ --gpus all \ - gcr.io/iree-oss/cmake-bazel-frontends-nvidia@sha256:71eeb44ba014ee043ae2adeeb6458bc281444ee6f295b5ba7e4337a69a95f7df \ + gcr.io/iree-oss/cmake-bazel-frontends-nvidia@sha256:7a2189f9c2c5491878fdf6d38ddab18832020a06285eeff31b8376b9415fb7e9 \ build_tools/kokoro/gcp_ubuntu/cmake-bazel/linux/x86-turing/build.sh # Kokoro will rsync this entire directory back to the executor orchestrating the
diff --git a/build_tools/kokoro/gcp_ubuntu/cmake/android/arm64-v8a/build_kokoro.sh b/build_tools/kokoro/gcp_ubuntu/cmake/android/arm64-v8a/build_kokoro.sh index dba5305..a24b2165 100755 --- a/build_tools/kokoro/gcp_ubuntu/cmake/android/arm64-v8a/build_kokoro.sh +++ b/build_tools/kokoro/gcp_ubuntu/cmake/android/arm64-v8a/build_kokoro.sh
@@ -24,7 +24,7 @@ docker_setup docker run "${DOCKER_RUN_ARGS[@]?}" \ - gcr.io/iree-oss/cmake-android@sha256:a152f0d006e237105f8ed9a7e041a6a235c1a69dc1e209383c81087c574b39bf \ + gcr.io/iree-oss/cmake-android@sha256:7d780787608474301e74e1b5cc2a1bfd1304a79ed9e0774c7ed422c0e4a38625 \ build_tools/kokoro/gcp_ubuntu/cmake/android/build.sh arm64-v8a # Kokoro will rsync this entire directory back to the executor orchestrating the
diff --git a/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader-asan/build.sh b/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader-asan/build.sh index 1f5b793..3c6a615 100755 --- a/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader-asan/build.sh +++ b/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader-asan/build.sh
@@ -125,6 +125,6 @@ cd ${CMAKE_BUILD_DIR?} echo "Testing with ctest" -ctest --output-on-failure \ +ctest --timeout 900 --output-on-failure \ --label-exclude "^driver=cuda$|^driver=vulkan$" \ --exclude-regex "${excluded_tests_regex?}"
diff --git a/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader-asan/build_kokoro.sh b/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader-asan/build_kokoro.sh index 9228bdb..d23027b 100755 --- a/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader-asan/build_kokoro.sh +++ b/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader-asan/build_kokoro.sh
@@ -24,7 +24,7 @@ docker_setup docker run "${DOCKER_RUN_ARGS[@]?}" \ - gcr.io/iree-oss/cmake-python-swiftshader@sha256:75ad1156a486aa4bf3f93d45f9a7e0f052345fa103171dfc742f8c92d4a7ea0f \ + gcr.io/iree-oss/cmake-python-swiftshader@sha256:9c3e5b8ba0c8ab0a9ef55c7141c71365cb3fab6f63c197fd4c7195d054ca6906 \ build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader/build.sh # Kokoro will rsync this entire directory back to the executor orchestrating the
diff --git a/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader/build_kokoro.sh b/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader/build_kokoro.sh index 9228bdb..d23027b 100755 --- a/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader/build_kokoro.sh +++ b/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader/build_kokoro.sh
@@ -24,7 +24,7 @@ docker_setup docker run "${DOCKER_RUN_ARGS[@]?}" \ - gcr.io/iree-oss/cmake-python-swiftshader@sha256:75ad1156a486aa4bf3f93d45f9a7e0f052345fa103171dfc742f8c92d4a7ea0f \ + gcr.io/iree-oss/cmake-python-swiftshader@sha256:9c3e5b8ba0c8ab0a9ef55c7141c71365cb3fab6f63c197fd4c7195d054ca6906 \ build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-swiftshader/build.sh # Kokoro will rsync this entire directory back to the executor orchestrating the
diff --git a/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-turing/build_kokoro.sh b/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-turing/build_kokoro.sh index ee3ffde..c3f6acf 100755 --- a/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-turing/build_kokoro.sh +++ b/build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-turing/build_kokoro.sh
@@ -30,7 +30,7 @@ docker run "${DOCKER_RUN_ARGS[@]?}" \ --gpus all \ - gcr.io/iree-oss/cmake-python-nvidia@sha256:f05191ba4fa3ccc392a4a1be0b0a49ea2a9c61b397f429430b6f1a2c6d7c8f59 \ + gcr.io/iree-oss/cmake-python-nvidia@sha256:d96ffdc44026bf112efca82a5150d783e8eba8976c7bc150863ec5868de40778 \ build_tools/kokoro/gcp_ubuntu/cmake/linux/x86-turing/build.sh # Kokoro will rsync this entire directory back to the executor orchestrating the
diff --git a/build_tools/mako/configuration.py b/build_tools/mako/configuration.py index 6c89d18..4816ac1 100644 --- a/build_tools/mako/configuration.py +++ b/build_tools/mako/configuration.py
@@ -269,18 +269,18 @@ ] })), ]), - ModelBenchmarkInfo( - name="mobilebert-f16", - model_artifacts_name="mobilebert-f16.tar.gz", - model_path="mobilebert-f16/mobilebert-f16.mlir", - flagfile_path="mobilebert-f16/flagfile", - phones=[ - PhoneBenchmarkInfo( - name="S20", - benchmark_key="4636549841944576", - targets=get_s20_default_target_list( - skipped_target=['cpu', 'vmvx3t', 'cpu2', 'vlk2'])), - ]) + #ModelBenchmarkInfo( + # name="mobilebert-f16", + # model_artifacts_name="mobilebert-f16.tar.gz", + # model_path="mobilebert-f16/mobilebert-f16.mlir", + # flagfile_path="mobilebert-f16/flagfile", + # phones=[ + # PhoneBenchmarkInfo( + # name="S20", + # benchmark_key="4636549841944576", + # targets=get_s20_default_target_list( + # skipped_target=['cpu', 'vmvx3t', 'cpu2', 'vlk2'])), + # ]) ]
diff --git a/build_tools/manylinux_py_setup.py b/build_tools/manylinux_py_setup.py deleted file mode 100755 index 5ed92a1..0000000 --- a/build_tools/manylinux_py_setup.py +++ /dev/null
@@ -1,78 +0,0 @@ -#!/opt/python/cp38-cp38/bin/python3 -# Copyright 2020 The IREE Authors -# -# Licensed under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -"""Generates CMake arguments to build all manylinux python versions. - -manylinux containers have all python version linked under /opt/python. -This script scrapes them to get configuration, install deps, etc. - -Usage: - Install dependencies: - manylinux_py_setup.py deps - Get CMake arguments to build (typically via $() expansion): - manylinux_py_setup.py args -""" - -import os -from pathlib import Path -import subprocess -import sys -import sysconfig - - -def _get_python_exes(): - PYTHON_PARENT_PATH = Path("/opt/python") - return PYTHON_PARENT_PATH.glob("*/bin/python") - - -def install_deps(): - for python_exe in _get_python_exes(): - args = [ - str(python_exe), - "-m", - "pip", - "install", - "absl-py", - "numpy", - ] - print("EXEC:", " ".join(args)) - subprocess.run(args, check=True) - - -def dump_current(identifier): - print("-DIREE_MULTIPY_{}_EXECUTABLE='{}'".format(identifier, sys.executable)) - print("-DIREE_MULTIPY_{}_INCLUDE_DIRS='{}'".format( - identifier, sysconfig.get_config_var("INCLUDEPY"))) - # TODO: Print LIBRARIES for Windows and OSX - print("-DIREE_MULTIPY_{}_EXTENSION='{}'".format( - identifier, sysconfig.get_config_var("EXT_SUFFIX"))) - - -def dump_all(): - versions_ids = [] - for python_exe in _get_python_exes(): - identifier = python_exe.parent.parent.name - versions_ids.append(identifier) - # Invoke ourselves with a different interpreter/args to dump config. - subprocess.run([str(python_exe), __file__, "_current_args", identifier], - check=True) - print("-DIREE_MULTIPY_VERSIONS='{}'".format(";".join(versions_ids))) - - -if __name__ == "__main__": - if len(sys.argv) < 2: - print("SYNTAX: mainlinux_py_setup.py {deps|args}") - sys.exit(1) - command = sys.argv[1] - if command == "args": - dump_all() - elif command == "_current_args": - dump_current(sys.argv[2]) - elif command == "deps": - install_deps() - else: - print("Unexpected command") - sys.exit(1)
diff --git a/build_tools/python_deploy/.gitignore b/build_tools/python_deploy/.gitignore new file mode 100644 index 0000000..450df16 --- /dev/null +++ b/build_tools/python_deploy/.gitignore
@@ -0,0 +1 @@ +manylinux2014-x64
diff --git a/build_tools/python_deploy/audit_wheels.sh b/build_tools/python_deploy/audit_wheels.sh new file mode 100755 index 0000000..709a644 --- /dev/null +++ b/build_tools/python_deploy/audit_wheels.sh
@@ -0,0 +1,47 @@ +#!/bin/bash +# Copyright 2021 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +# Given a list of wheel files, launch a suitable docker container +# and audit/repair them. +# Usage: ./audit_wheels.sh <files> +# Typically: +# ./build_tools/python_deploy/audit_wheels.sh \ +# ./llvm-projects/iree-compiler-api/wheels/iree_compiler_api-*.whl +# +# Note that this will mount the current directory under docker, so all +# files must be relative. + +set -eu -o errtrace + +this_dir="$(cd $(dirname $0) && pwd)" +script_name="$(basename $0)" +dockcross_image="manylinux2014-x64" + +function run_on_host() { + echo "Running on host" + "$this_dir/setup_dockcross.sh" $dockcross_image + + echo "Running in docker..." + "$this_dir/$dockcross_image" \ + --args "-v $this_dir:/python_deploy -e __MANYLINUX_BUILD_WHEELS_IN_DOCKER=1" \ + -- bash /python_deploy/$script_name "$@" +} + +function run_in_docker() { + for relative_path in "$@"; do + abs_path="/work/$relative_path" + echo "Repairing $relative_path ($abs_path)" + auditwheel repair -w "$(dirname $abs_path)"/audited "$abs_path" + done +} + +# Trampoline to the docker container if running on the host. +if [ -z "${__MANYLINUX_BUILD_WHEELS_IN_DOCKER-}" ]; then + run_on_host "$@" +else + run_in_docker "$@" +fi
diff --git a/build_tools/python_deploy/manylinux_foreach_py.sh b/build_tools/python_deploy/manylinux_foreach_py.sh new file mode 100755 index 0000000..11ce388 --- /dev/null +++ b/build_tools/python_deploy/manylinux_foreach_py.sh
@@ -0,0 +1,59 @@ +#!/bin/bash +# Copyright 2020 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +# Script to do ad-hoc builds for all python versions in a dockcross manylinux +# container. Mostly, there are CI scripts and actions that do this, but they +# generally are not friendly for running directly, and sometimes you just +# want to be able to build a binary without balancing the world. This script +# is for such times. +# +# Typical usage: +# ./build_tools/python_deploy/manylinux_foreach_py.sh \ +# ./llvm-projects/iree-compiler-api/build_tools/build_python_wheels.sh +set -eu -o errtrace + +this_dir="$(cd $(dirname $0) && pwd)" +script_name="$(basename $0)" +dockcross_image="manylinux2014-x64" +python_versions="cp37-cp37m cp38-cp38 cp39-cp39" + +function run_on_host() { + echo "Running on host" + "$this_dir/setup_dockcross.sh" $dockcross_image + + echo "Running in docker..." + "$this_dir/$dockcross_image" \ + --args "-v $this_dir:/python_deploy -e __MANYLINUX_BUILD_WHEELS_IN_DOCKER=1 -e MANYLINUX_AUDITWHEEL_REPAIR=1" \ + -- bash /python_deploy/$script_name "$@" +} + +function run_in_docker() { + local script="$1" + shift + echo "Running in docker" + local orig_path="$PATH" + local script_path="/work/$script" + for python_version in $python_versions; do + python_dir="/opt/python/$python_version" + if ! [ -x "$python_dir/bin/python" ]; then + echo "ERROR: Could not find python: $python_dir (skipping)" + continue + fi + export PATH=$python_dir/bin:$orig_path + # CMake has trouble with manylinux versions of python, so set some env + # vars. + echo "Running $script_path $@" + "$script_path" "$@" + done +} + +# Trampoline to the docker container if running on the host. +if [ -z "${__MANYLINUX_BUILD_WHEELS_IN_DOCKER-}" ]; then + run_on_host "$@" +else + run_in_docker "$@" +fi
diff --git a/build_tools/python_deploy/setup_dockcross.sh b/build_tools/python_deploy/setup_dockcross.sh new file mode 100644 index 0000000..db584c7 --- /dev/null +++ b/build_tools/python_deploy/setup_dockcross.sh
@@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright 2021 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +# Sets up launch scripts for containers. +# Typical usage: ./setup_dockcross.sh manylinux2014-x64 + +set -eu -o errtrace + +this_dir="$(cd $(dirname $0) && pwd)" + +for container in "$@"; do + launch_script="$this_dir/$container" + if ! [ -x "$launch_script" ]; then + echo "Launch script not found. Generating..." + docker run --rm dockcross/$container > $launch_script + chmod u+x $launch_script + fi +done
diff --git a/build_tools/pytype/check_diff.sh b/build_tools/pytype/check_diff.sh index 4159b03..02114a7 100755 --- a/build_tools/pytype/check_diff.sh +++ b/build_tools/pytype/check_diff.sh
@@ -20,13 +20,13 @@ if [[ "${DIFF_TARGET?}" = "all" ]]; then FILES=$(find -name "*\.py" -not -path "./third_party/*") else - FILES=$(git diff --diff-filter=d --name-only "${DIFF_TARGET?}" | grep '.*\.py') + FILES=$(git diff --diff-filter=d --name-only "${DIFF_TARGET?}" | grep '.*\.py$' | grep -vP 'lit.cfg.py') fi # We seperate the python files into multiple pytype calls because otherwise # Ninja gets confused. See https://github.com/google/pytype/issues/198 -BASE=$(echo "${FILES?}" | grep -vP '^(\./)?integrations/*') +BASE=$(echo "${FILES?}" | grep -vP '^(\./)?integrations/*$') IREE_TF=$(echo "${FILES?}" | \ grep -P '^(\./)?integrations/tensorflow/bindings/python/iree/tf/.*') IREE_XLA=$(echo "${FILES?}" | \
diff --git a/docs/developers/design_docs/function_abi.md b/docs/developers/design_docs/function_abi.md index e005918..49cdc54 100644 --- a/docs/developers/design_docs/function_abi.md +++ b/docs/developers/design_docs/function_abi.md
@@ -64,14 +64,14 @@ - Runtime: [`iree_vm_list`](https://github.com/google/iree/blob/main/iree/vm/list.h) containing `i8` - - Compile Time: `!iree.list<i8>` + - Compile Time: `!util.list<i8>` - Tuple: - Runtime: [`iree_vm_list`](https://github.com/google/iree/blob/main/iree/vm/list.h) of variant - - Compile Time: `!iree.list<?>` + - Compile Time: `!util.list<?>` - Note that these are statically type erased at the boundary. - TypedList (homogenous): @@ -79,7 +79,7 @@ - Runtime: [`iree_vm_list`](https://github.com/google/iree/blob/main/iree/vm/list.h) of `T` - - Compile Time: `!iree.list<T>` + - Compile Time: `!util.list<T>` ### Extended Type Calling Conventions @@ -105,7 +105,7 @@ slots. In both, its slots are of fixed arity. In this convention, such a structure is represented as a `Tuple` in the native -calling convention (i.e. `!iree.list` of variant type). The order of the +calling convention (i.e. `!util.list` of variant type). The order of the elements of the tuple are the natural order of the structure, where that is either: @@ -118,7 +118,7 @@ Most languages interop between byte arrays (i.e. the native ABI `String` type) by way of applying an encoding. Such strings are just a sequence of bytes (i.e. -`!iree.list<i8>`). +`!util.list<i8>`). #### Typed List @@ -134,8 +134,8 @@ performance: by nature they are already indirected and have overheads. In the native ABI, these are represented as a composite tuple type (i.e. today a -list since sugar for tuple is not yet defined): `!iree.tuple<!iree.list<T>, -!iree.list<index>>`. The first element of the tuple is the list of values, +list since sugar for tuple is not yet defined): `!iree.tuple<!util.list<T>, +!util.list<index>>`. The first element of the tuple is the list of values, packed with a C-Layout and the second element is the list of dimension sizes. #### Reflection @@ -168,6 +168,9 @@ A compound type tuple has a type identifier as its first element, followed with type specific fields: +- `["named", "key", {slot_type}]`: Associates a name with a slot. This is + used with the root argument list to denote named arguments that can be + passed positionally or by keyword. - `["ndarray", {element_type}, {rank}, {dim...}]`: For unknown rank, the `rank` will be `null` and there will be no dims. Any unknown dim will be `null`. @@ -180,6 +183,3 @@ - `["sdict", ["key", {slot_type}]...]`: An anonymous structure with named slots. Note that when passing these types, the keys are not passed to the function (only the slot values). -- `["sdict_kwargs", ...]`: Same as `sdict` but signifies to languages that - allow keyword-argument passing that this is the keyword-argument dictionary. - It can only ever be present as the last entry of the root arguments `slist`.
diff --git a/docs/developers/design_roadmap.md b/docs/developers/design_roadmap.md index 7a60a1f..39312d1 100644 --- a/docs/developers/design_roadmap.md +++ b/docs/developers/design_roadmap.md
@@ -608,7 +608,7 @@ 'instructions' are entire command buffer dispatches/transfers). Two degenerate cases of this approach are that of resource indirection -(`iree.ptr<tensor<T>>`) and dynamic resource shapes. In these two cases it may +(`util.ptr<tensor<T>>`) and dynamic resource shapes. In these two cases it may not be possible to continue recording commands even if we are able to ensure execution is appropriately synchronized. This is where indirect dispatch, [predication](#predication-of-flowdispatch),
diff --git a/docs/developers/developing_iree/developer_overview.md b/docs/developers/developing_iree/developer_overview.md index 64aad5d..b038be4 100644 --- a/docs/developers/developing_iree/developer_overview.md +++ b/docs/developers/developing_iree/developer_overview.md
@@ -77,14 +77,14 @@ `FileCheck` should be used to test the generated output. Here's an example of a small compiler pass running on a -[test file](https://github.com/google/iree/blob/main/iree/compiler/Dialect/IREE/Transforms/test/drop_compiler_hints.mlir): +[test file](https://github.com/google/iree/blob/main/iree/compiler/Dialect/Util/Transforms/test/drop_compiler_hints.mlir): ```shell $ ../iree-build/iree/tools/iree-opt \ -split-input-file \ -print-ir-before-all \ -iree-drop-compiler-hints \ - $PWD/iree/compiler/Dialect/IREE/Transforms/test/drop_compiler_hints.mlir + $PWD/iree/compiler/Dialect/Util/Transforms/test/drop_compiler_hints.mlir ``` For a more complex example, here's how to run IREE's complete transformation
diff --git a/docs/developers/developing_iree/testing_guide.md b/docs/developers/developing_iree/testing_guide.md index 6b4512c..926630c 100644 --- a/docs/developers/developing_iree/testing_guide.md +++ b/docs/developers/developing_iree/testing_guide.md
@@ -223,28 +223,28 @@ ```mlir func @tensor() { - %input = iree.unfoldable_constant dense<[0.0, 1.1, 2.5, 4.9]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[0.0, 1.1, 2.5, 4.9]> : tensor<4xf32> %result = "mhlo.floor"(%input) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32>): tensor<4xf32> return } func @scalar() { - %input = iree.unfoldable_constant dense<101.3> : tensor<f32> + %input = util.unfoldable_constant dense<101.3> : tensor<f32> %result = "mhlo.floor"(%input) : (tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<101.0> : tensor<f32>): tensor<f32> return } func @double() { - %input = iree.unfoldable_constant dense<11.2> : tensor<f64> + %input = util.unfoldable_constant dense<11.2> : tensor<f64> %result = "mhlo.floor"(%input) : (tensor<f64>) -> tensor<f64> check.expect_almost_eq_const(%result, dense<11.0> : tensor<f64>): tensor<f64> return } func @negative() { - %input = iree.unfoldable_constant dense<-1.1> : tensor<f32> + %input = util.unfoldable_constant dense<-1.1> : tensor<f32> %result = "mhlo.floor"(%input) : (tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<-2.0> : tensor<f32>): tensor<f32> return @@ -253,12 +253,12 @@ Test cases are created in gtest for each public function exported by the module. -Note the use of `iree.unfoldable_constant` to specify test constants. If we were +Note the use of `util.unfoldable_constant` to specify test constants. If we were to use a regular constant, the compiler would "helpfully" fold away everything at compile time and our test would not actually test the runtime. `unfoldable_constant` hides the value of the constant from the compiler so it cannot use it at compile time. To hide an arbitrary SSA-value, you can use -`iree.do_not_optimize`. This wraps any value in an unoptimizable identity +`util.do_not_optimize`. This wraps any value in an unoptimizable identity function. Next we use this input constant to exercise the runtime feature under test (in
diff --git a/docs/developers/get_started/getting_started_python.md b/docs/developers/get_started/getting_started_python.md index ef8ccc1..a10eac8 100644 --- a/docs/developers/get_started/getting_started_python.md +++ b/docs/developers/get_started/getting_started_python.md
@@ -25,7 +25,7 @@ ## Python Setup -Install [Python 3](https://www.python.org/downloads/) `>= 3.6` and +Install [Python 3](https://www.python.org/downloads/) `>= 3.7` and [pip](https://pip.pypa.io/en/stable/installing/), if needed. > Note:<br>
diff --git a/docs/website/docs/building-from-source/optional-features.md b/docs/website/docs/building-from-source/optional-features.md index 5a7d2ac..95aac4a 100644 --- a/docs/website/docs/building-from-source/optional-features.md +++ b/docs/website/docs/building-from-source/optional-features.md
@@ -19,7 +19,7 @@ **Pre-requisites:** -* A relatively recent Python3 installation (we aim to support +* A relatively recent Python3 installation >=3.7 (we aim to support [non-eol Python versions](https://endoflife.date/python)). * Installation of python dependencies as specified in [`bindings/python/build_requirements.txt`](https://github.com/google/iree/blob/main/bindings/python/build_requirements.txt).
diff --git a/integrations/tensorflow/bindings/python/build_requirements.txt b/integrations/tensorflow/bindings/python/build_requirements.txt index 7177e34..38547ff 100644 --- a/integrations/tensorflow/bindings/python/build_requirements.txt +++ b/integrations/tensorflow/bindings/python/build_requirements.txt
@@ -1,3 +1,3 @@ -tf-nightly>=2.6.0.dev0 +tf-nightly>=2.7.0.dev20210802 tensorflow-model-optimization==0.5.1.dev0 PyYAML
diff --git a/integrations/tensorflow/bindings/python/iree/tools/tf/setup.py.in b/integrations/tensorflow/bindings/python/iree/tools/tf/setup.py.in index 6e4e63d..0e7e59e 100644 --- a/integrations/tensorflow/bindings/python/iree/tools/tf/setup.py.in +++ b/integrations/tensorflow/bindings/python/iree/tools/tf/setup.py.in
@@ -69,9 +69,10 @@ "Operating System :: OS Independent", "Development Status :: 3 - Alpha", ], - python_requires=">=3.6", + python_requires=">=3.7", packages=find_namespace_packages(include=[ "iree.tools.tf", + "iree.tools.tf.*", "iree.tf.support", ]), package_data={
diff --git a/integrations/tensorflow/bindings/python/iree/tools/tflite/setup.py.in b/integrations/tensorflow/bindings/python/iree/tools/tflite/setup.py.in index ff5f6f2..7952329 100644 --- a/integrations/tensorflow/bindings/python/iree/tools/tflite/setup.py.in +++ b/integrations/tensorflow/bindings/python/iree/tools/tflite/setup.py.in
@@ -69,8 +69,11 @@ "Operating System :: OS Independent", "Development Status :: 3 - Alpha", ], - python_requires=">=3.6", - packages=find_namespace_packages(include=["iree.tools.tflite"]), + python_requires=">=3.7", + packages=find_namespace_packages(include=[ + "iree.tools.tflite", + "iree.tools.tflite.*", + ]), package_data={ "iree.tools.tflite": [f"iree-import-tflite{exe_suffix}",], },
diff --git a/integrations/tensorflow/bindings/python/iree/tools/xla/setup.py.in b/integrations/tensorflow/bindings/python/iree/tools/xla/setup.py.in index a623f50..a2d6a92 100644 --- a/integrations/tensorflow/bindings/python/iree/tools/xla/setup.py.in +++ b/integrations/tensorflow/bindings/python/iree/tools/xla/setup.py.in
@@ -69,8 +69,11 @@ "Operating System :: OS Independent", "Development Status :: 3 - Alpha", ], - python_requires=">=3.6", - packages=find_namespace_packages(include=["iree.tools.xla"]), + python_requires=">=3.7", + packages=find_namespace_packages(include=[ + "iree.tools.xla", + "iree.tools.xla.*", + ]), package_data={ "iree.tools.xla": [f"iree-import-xla{exe_suffix}",], },
diff --git a/integrations/tensorflow/build_tools/overlay/mlir-hlo/BUILD.bazel b/integrations/tensorflow/build_tools/overlay/mlir-hlo/BUILD.bazel index afeede3..687d9fc 100644 --- a/integrations/tensorflow/build_tools/overlay/mlir-hlo/BUILD.bazel +++ b/integrations/tensorflow/build_tools/overlay/mlir-hlo/BUILD.bazel
@@ -24,6 +24,7 @@ for name in [ "chlo_legalize_to_hlo", "legalize_control_flow", + "legalize_einsum_to_dot_general", "legalize_gather_to_torch_index_select", "legalize_to_linalg", "lhlo",
diff --git a/integrations/tensorflow/iree_tf_compiler/BUILD b/integrations/tensorflow/iree_tf_compiler/BUILD index 29bbcb5..d46ff64 100644 --- a/integrations/tensorflow/iree_tf_compiler/BUILD +++ b/integrations/tensorflow/iree_tf_compiler/BUILD
@@ -35,7 +35,7 @@ "//iree_tf_compiler/TF", "@iree//iree/compiler/Dialect/Flow/IR", "@iree//iree/compiler/Dialect/HAL/IR", - "@iree//iree/compiler/Dialect/IREE/IR", + "@iree//iree/compiler/Dialect/Util/IR", "@iree//iree/compiler/InputConversion/Common", "@iree//iree/compiler/InputConversion/MHLO", "@iree//iree/compiler/InputConversion/TOSA",
diff --git a/integrations/tensorflow/iree_tf_compiler/MHLO/BUILD b/integrations/tensorflow/iree_tf_compiler/MHLO/BUILD index 1b07b9c..e5143ef 100644 --- a/integrations/tensorflow/iree_tf_compiler/MHLO/BUILD +++ b/integrations/tensorflow/iree_tf_compiler/MHLO/BUILD
@@ -27,8 +27,8 @@ "@iree//iree/compiler/Codegen:PassHeaders", "@iree//iree/compiler/Dialect/Flow/IR", "@iree//iree/compiler/Dialect/Flow/Transforms", - "@iree//iree/compiler/Dialect/IREE/IR", "@iree//iree/compiler/Dialect/Shape/Transforms", + "@iree//iree/compiler/Dialect/Util/IR", "@iree//iree/compiler/InputConversion/MHLO", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR",
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/BUILD b/integrations/tensorflow/iree_tf_compiler/TF/BUILD index 3a47abf..13b7aac 100644 --- a/integrations/tensorflow/iree_tf_compiler/TF/BUILD +++ b/integrations/tensorflow/iree_tf_compiler/TF/BUILD
@@ -36,8 +36,8 @@ "@iree//iree/compiler/Dialect/Flow/Transforms", "@iree//iree/compiler/Dialect/HAL/IR", "@iree//iree/compiler/Dialect/HAL/IR:HALDialect", - "@iree//iree/compiler/Dialect/IREE/IR", "@iree//iree/compiler/Dialect/Shape/Transforms", + "@iree//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR", "@llvm-project//mlir:LinalgOps", @@ -58,6 +58,7 @@ "@org_tensorflow//tensorflow/compiler/mlir/tensorflow:lower_tf_lib", "@org_tensorflow//tensorflow/compiler/mlir/tensorflow:tensorflow_types", "@org_tensorflow//tensorflow/compiler/mlir/tensorflow:tf_saved_model_passes", + "@org_tensorflow//tensorflow/compiler/mlir/tosa:tf_passes", "@org_tensorflow//tensorflow/compiler/mlir/xla:xla_legalize_tf", ], )
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/LowerGlobalTensors.cpp b/integrations/tensorflow/iree_tf_compiler/TF/LowerGlobalTensors.cpp index 0616e07..7445215 100644 --- a/integrations/tensorflow/iree_tf_compiler/TF/LowerGlobalTensors.cpp +++ b/integrations/tensorflow/iree_tf_compiler/TF/LowerGlobalTensors.cpp
@@ -6,8 +6,8 @@ #include "iree/compiler/Dialect/Flow/IR/FlowDialect.h" #include "iree/compiler/Dialect/Flow/IR/FlowOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree_tf_compiler/TF/Passes.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/STLExtras.h" @@ -96,7 +96,7 @@ auto variableAddressOp = builder.create<iree_compiler::IREE::Flow::VariableAddressOp>( globalTensor.getLoc(), - iree_compiler::IREE::PtrType::get(globalTensor.type()), + iree_compiler::IREE::Util::PtrType::get(globalTensor.type()), builder.getSymbolRefAttr( symNameToFlowSymName[globalTensor.sym_name()])); typeConversionWorklist.push_back(variableAddressOp.getResult()); @@ -166,7 +166,7 @@ void getDependentDialects(DialectRegistry ®istry) const override { registry.insert<mlir::tf_saved_model::TensorFlowSavedModelDialect, iree_compiler::IREE::Flow::FlowDialect, - iree_compiler::IREEDialect>(); + iree_compiler::IREE::Util::UtilDialect>(); } StringRef getArgument() const override {
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/Passes.cpp b/integrations/tensorflow/iree_tf_compiler/TF/Passes.cpp index b2f9a00..e5d0f42 100644 --- a/integrations/tensorflow/iree_tf_compiler/TF/Passes.cpp +++ b/integrations/tensorflow/iree_tf_compiler/TF/Passes.cpp
@@ -16,6 +16,7 @@ #include "mlir/Transforms/Passes.h" #include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h" #include "tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_passes.h" +#include "tensorflow/compiler/mlir/tosa/tf_passes.h" namespace mlir { namespace iree_integrations { @@ -23,7 +24,7 @@ // All IREE-specific passes that lower TF representations before reaching the // IREE core should go here. -void buildTFImportPassPipeline(OpPassManager &pm) { +void buildTFImportPassPipeline(OpPassManager &pm, bool useTosa) { //---------------------------------------------------------------------------- // Clean up tf_executor and extraneous unused functions. //---------------------------------------------------------------------------- @@ -60,10 +61,15 @@ pm.addPass(createCanonicalizerPass()); //---------------------------------------------------------------------------- - // Legalize to XLA + // Legalize to TOSA/XLA //---------------------------------------------------------------------------- - pm.addPass(createConvertToMHLOPass()); - pm.addPass(createCanonicalizerPass()); + if (useTosa) { + tosa::TOSATFLegalizationPipelineOptions tosaOptions; + tosa::createTFtoTOSALegalizationPipeline(pm, tosaOptions); + } else { + pm.addPass(createConvertToMHLOPass()); + pm.addPass(createCanonicalizerPass()); + } //---------------------------------------------------------------------------- // Now that the IR is starting to look nice, optimize global tensors. @@ -110,7 +116,16 @@ "iree-import-tf-pipeline", "Run IREE-specific passes for importing TF code into IREE", [](OpPassManager &passManager) { - buildTFImportPassPipeline(passManager); + buildTFImportPassPipeline(passManager, false); + }); +} + +void registerTFTosaImportPassPipeline() { + mlir::PassPipelineRegistration<> pipeline( + "iree-import-tf-tosa-pipeline", + "Run IREE-specific passes for importing TF code into IREE", + [](OpPassManager &passManager) { + buildTFImportPassPipeline(passManager, true); }); }
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/Passes.h b/integrations/tensorflow/iree_tf_compiler/TF/Passes.h index 5fd35ca..5faec10 100644 --- a/integrations/tensorflow/iree_tf_compiler/TF/Passes.h +++ b/integrations/tensorflow/iree_tf_compiler/TF/Passes.h
@@ -19,7 +19,7 @@ // Create a single pipeline that will run all the needed IREE-specific TF import // passes in the right order. -void buildTFImportPassPipeline(OpPassManager &pm); +void buildTFImportPassPipeline(OpPassManager &pm, bool useTosa); void registerTFImportPassPipeline(); //===----------------------------------------------------------------------===//
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/SavedModelToIreeABI.cpp b/integrations/tensorflow/iree_tf_compiler/TF/SavedModelToIreeABI.cpp index 18ac931..0c41825 100644 --- a/integrations/tensorflow/iree_tf_compiler/TF/SavedModelToIreeABI.cpp +++ b/integrations/tensorflow/iree_tf_compiler/TF/SavedModelToIreeABI.cpp
@@ -20,9 +20,9 @@ #include "iree/compiler/Dialect/Flow/IR/FlowOps.h" #include "iree/compiler/Dialect/HAL/IR/HALDialect.h" #include "iree/compiler/Dialect/HAL/IR/HALOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree_tf_compiler/TF/Passes.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/JSON.h" @@ -101,6 +101,7 @@ // index for arguments, return index for returns). int valueIndex = 0; Type valueType; + StringRef valueName; // For child levels, the key in the parent container, either a string or int // value. @@ -109,15 +110,7 @@ // Children (must be heap allocated due to recursion). std::vector<StructureLevel> children; - - // The root argument level is still just a list but has a special dict - // child in which to shove kwargs. Once everything is constructed, if the - // kwargs dict is not empty, it is added as the last child. bool isRootArgs = false; - std::unique_ptr<StructureLevel> kwargs; - - // If this is the kwargs dict level, it will have this bit set. - bool isKwargs = false; static StructureLevel leafValue(int valueIndex) { return StructureLevel{LevelType::Value, valueIndex}; @@ -126,23 +119,20 @@ static StructureLevel createRootArgsList() { StructureLevel ret = StructureLevel{LevelType::List}; ret.isRootArgs = true; - ret.kwargs = - std::unique_ptr<StructureLevel>(new StructureLevel{LevelType::Dict}); - ret.kwargs->isKwargs = true; return ret; } Type getIrType(Builder builder) { - auto variantType = IREE::VariantType::get(builder.getContext()); + auto variantType = IREE::Util::VariantType::get(builder.getContext()); if (type == LevelType::Value) { if (valueType.isa<TensorType>()) { return IREE::HAL::BufferViewType::get(builder.getContext()); } return valueType; } else if (type == LevelType::List || type == LevelType::Tuple) { - return IREE::ListType::get(variantType); + return IREE::Util::ListType::get(variantType); } else if (type == LevelType::Dict) { - return IREE::ListType::get(variantType); + return IREE::Util::ListType::get(variantType); } llvm_unreachable("Unknown LevelType"); @@ -168,7 +158,17 @@ json::Value createReflectionType() { switch (type) { case LevelType::Value: - return mapTypeToJsonTypeRecord(valueType); + if (valueName.empty()) { + // Unnamed. + return mapTypeToJsonTypeRecord(valueType); + } else { + // Named. + json::Array namedRecord; + namedRecord.push_back(json::Value("named")); + namedRecord.push_back(json::Value(valueName)); + namedRecord.push_back(mapTypeToJsonTypeRecord(valueType)); + return json::Value(std::move(namedRecord)); + } case LevelType::List: case LevelType::Tuple: { json::Array typeRecord; @@ -186,8 +186,7 @@ } case LevelType::Dict: { json::Array typeRecord; - typeRecord.push_back(isKwargs ? json::Value("sdict_kwargs") - : json::Value("sdict")); + typeRecord.push_back(json::Value("sdict")); for (auto &child : children) { json::Array nvRecord; nvRecord.push_back(child.skey); @@ -263,14 +262,15 @@ Value listSizeValue = builder.create<ConstantOp>(loc, builder.getIndexType(), builder.getIndexAttr(getNeededListSize())); - Value listValue = builder.create<IREE::ListCreateOp>( + Value listValue = builder.create<IREE::Util::ListCreateOp>( loc, getIrType(builder), listSizeValue); - builder.create<IREE::ListResizeOp>(loc, listValue, listSizeValue); + builder.create<IREE::Util::ListResizeOp>(loc, listValue, listSizeValue); for (StructureLevel &child : children) { Value childValue = child.emitCreateReturns(loc, builder, callReturns); Value indexValue = builder.create<ConstantOp>( loc, builder.getIndexType(), builder.getIndexAttr(child.ikey)); - builder.create<IREE::ListSetOp>(loc, listValue, indexValue, childValue); + builder.create<IREE::Util::ListSetOp>(loc, listValue, indexValue, + childValue); } return listValue; } @@ -280,15 +280,16 @@ Value listSizeValue = builder.create<ConstantOp>(loc, builder.getIndexType(), builder.getIndexAttr(getNeededListSize())); - Value listValue = builder.create<IREE::ListCreateOp>( + Value listValue = builder.create<IREE::Util::ListCreateOp>( loc, getIrType(builder), listSizeValue); - builder.create<IREE::ListResizeOp>(loc, listValue, listSizeValue); + builder.create<IREE::Util::ListResizeOp>(loc, listValue, listSizeValue); for (auto it : llvm::enumerate(children)) { StructureLevel &child = it.value(); Value childValue = child.emitCreateReturns(loc, builder, callReturns); Value indexValue = builder.create<ConstantOp>( loc, builder.getIndexType(), builder.getIndexAttr(it.index())); - builder.create<IREE::ListSetOp>(loc, listValue, indexValue, childValue); + builder.create<IREE::Util::ListSetOp>(loc, listValue, indexValue, + childValue); } return listValue; } @@ -301,8 +302,8 @@ int index) { Value indexValue = builder.create<ConstantOp>(loc, builder.getIndexType(), builder.getIndexAttr(index)); - Value itemValue = builder.create<IREE::ListGetOp>(loc, getIrType(builder), - parentList, indexValue); + Value itemValue = builder.create<IREE::Util::ListGetOp>( + loc, getIrType(builder), parentList, indexValue); // TODO: Null check, etc. How does that work if returning a tensor? Need // to box somehow? if (itemValue.getType().isa<IREE::HAL::BufferViewType>()) { @@ -313,12 +314,6 @@ } void normalize() { - // Handle root kwargs. - if (isRootArgs && !kwargs->children.empty()) { - kwargs->ikey = children.size(); - children.push_back(std::move(*kwargs)); - } - // Sort by key. if (type == LevelType::List || type == LevelType::Tuple) { std::sort( @@ -369,9 +364,17 @@ StructureLevel *allocateChild(Location loc, StringRef childKey) { if (type == LevelType::None) type = LevelType::Dict; if (type != LevelType::Dict) { - // Special case for root-args: shove it into the kwargs. + // Special case for root-args: create a named bindings. if (isRootArgs) { - return kwargs->allocateChild(loc, childKey); + int maxIKey = 0; + for (auto &child : children) { + if (child.ikey > maxIKey) maxIKey = child.ikey; + } + + children.push_back({}); + children.back().ikey = maxIKey + 1; + children.back().valueName = childKey; + return &children.back(); } else { emitError(loc) << "structure path mismatch: dereference a non-dict " << "with a dict key '" << childKey << "'"; @@ -463,7 +466,11 @@ // towards multi-return safe by converting to tuple. // TODO: Investigate upstream whether there are additional signals to be // plumbed. - bool isMultiResult = resultsRoot.type == LevelType::Tuple; + // Tuples, lists and dicts are just inlined as multi results instead of + // introducing a root nesting. + bool isMultiResult = resultsRoot.type == LevelType::Tuple || + resultsRoot.type == LevelType::List || + resultsRoot.type == LevelType::Dict; // Build the wrapper function type. SmallVector<Type> wrapperArgTypes; @@ -525,8 +532,9 @@ for (int i = 0, e = resultsRoot.children.size(); i < e; ++i) { wrapperReturns[i] = resultsRoot.children[i].emitCreateReturns( loc, builder, internalResults); - refReturns.push_back(resultsRoot.children[i].createReflectionType()); } + // Multi-result roots are implicitly inlined. + refReturns.push_back(resultsRoot.createReflectionType()); } else { // Single return. assert(wrapperReturns.size() == 1 && @@ -563,9 +571,10 @@ : public PassWrapper<SavedModelToIREEABIPass, OperationPass<ModuleOp>> { public: void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert<IREE::Flow::FlowDialect, iree_compiler::IREEDialect, - IREE::HAL::HALDialect, - mlir::tf_saved_model::TensorFlowSavedModelDialect>(); + registry + .insert<IREE::Flow::FlowDialect, iree_compiler::IREE::Util::UtilDialect, + IREE::HAL::HALDialect, + mlir::tf_saved_model::TensorFlowSavedModelDialect>(); } StringRef getArgument() const override {
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors.mlir index 4199872..b52ea64 100644 --- a/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors.mlir +++ b/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors.mlir
@@ -8,8 +8,8 @@ // CHECK: flow.variable [[V:@[a-zA-Z0-9$._-]+]] mutable dense<1.000000e+00> : tensor<1xf32> // CHECK: func @f() -> (tensor<?xf32> {tf_saved_model.index_path = []}) -// CHECK-NEXT: [[PTR:%.+]] = flow.variable.address [[V]] : !iree.ptr<tensor<?xf32>> -// CHECK-NEXT: [[T:%.+]] = flow.variable.load.indirect [[PTR]] : !iree.ptr<tensor<?xf32>> -> tensor<?xf32> +// CHECK-NEXT: [[PTR:%.+]] = flow.variable.address [[V]] : !util.ptr<tensor<?xf32>> +// CHECK-NEXT: [[T:%.+]] = flow.variable.load.indirect [[PTR]] : !util.ptr<tensor<?xf32>> -> tensor<?xf32> // CHECK-NEXT: return [[T]] : tensor<?xf32> "tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> () @@ -28,8 +28,8 @@ // CHECK: flow.variable [[V:@[a-zA-Z0-9$._-]+]] mutable dense<1.000000e+00> : tensor<1xf32> // CHECK: func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}) -// CHECK-NEXT: [[PTR:%.+]] = flow.variable.address @__iree_flow_v : !iree.ptr<tensor<?xf32>> -// CHECK-NEXT: flow.variable.store.indirect %arg0, [[PTR]] : tensor<?xf32> -> !iree.ptr<tensor<?xf32>> +// CHECK-NEXT: [[PTR:%.+]] = flow.variable.address @__iree_flow_v : !util.ptr<tensor<?xf32>> +// CHECK-NEXT: flow.variable.store.indirect %arg0, [[PTR]] : tensor<?xf32> -> !util.ptr<tensor<?xf32>> // CHECK-NEXT: return "tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> ()
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors_complex.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors_complex.mlir index aabefef..360bb56 100644 --- a/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors_complex.mlir +++ b/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors_complex.mlir
@@ -7,10 +7,10 @@ // CHECK: flow.variable [[V:@.+]] mutable dense<1.000000e+00> : tensor<1xf32> // CHECK: func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}) attributes {tf_saved_model.exported_names = ["f"]} { -// CHECK-NEXT: [[PTR:%.+]] = flow.variable.address [[V]] : !iree.ptr<tensor<?xf32>> -// CHECK-NEXT: br ^bb1([[PTR]] : !iree.ptr<tensor<?xf32>>) -// CHECK-NEXT: ^bb1([[PTR1:%.+]]: !iree.ptr<tensor<?xf32>>): // pred: ^bb0 -// CHECK-NEXT: flow.variable.store.indirect %arg0, [[PTR1]] : tensor<?xf32> -> !iree.ptr<tensor<?xf32>> +// CHECK-NEXT: [[PTR:%.+]] = flow.variable.address [[V]] : !util.ptr<tensor<?xf32>> +// CHECK-NEXT: br ^bb1([[PTR]] : !util.ptr<tensor<?xf32>>) +// CHECK-NEXT: ^bb1([[PTR1:%.+]]: !util.ptr<tensor<?xf32>>): // pred: ^bb0 +// CHECK-NEXT: flow.variable.store.indirect %arg0, [[PTR1]] : tensor<?xf32> -> !util.ptr<tensor<?xf32>> // CHECK-NEXT: return @@ -31,12 +31,12 @@ // CHECK: flow.variable [[V:@.+]] mutable dense<1.000000e+00> : tensor<1xf32> // CHECK: flow.variable [[V1:@.+]] mutable dense<1.000000e+00> : tensor<1xf32> // CHECK: func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}) -> (tensor<?xf32> {tf_saved_model.index_path = [0]}) attributes {tf_saved_model.exported_names = ["f"]} { -// CHECK-NEXT: [[PTR0:%.+]] = flow.variable.address [[V]] : !iree.ptr<tensor<?xf32>> -// CHECK-NEXT: [[PTR1:%.+]] = flow.variable.address [[V1]] : !iree.ptr<tensor<?xf32>> +// CHECK-NEXT: [[PTR0:%.+]] = flow.variable.address [[V]] : !util.ptr<tensor<?xf32>> +// CHECK-NEXT: [[PTR1:%.+]] = flow.variable.address [[V1]] : !util.ptr<tensor<?xf32>> // CHECK-NEXT: %[[FALSE:.+]] = constant false -// CHECK-NEXT: cond_br %[[FALSE]], ^bb1([[PTR0]] : !iree.ptr<tensor<?xf32>>), ^bb1([[PTR1]] : !iree.ptr<tensor<?xf32>>) -// CHECK-NEXT: ^bb1([[PTR:%.+]]: !iree.ptr<tensor<?xf32>>): // 2 preds: ^bb0, ^bb0 -// CHECK-NEXT: [[T:%.+]] = flow.variable.load.indirect [[PTR]] : !iree.ptr<tensor<?xf32>> -> tensor<?xf32> +// CHECK-NEXT: cond_br %[[FALSE]], ^bb1([[PTR0]] : !util.ptr<tensor<?xf32>>), ^bb1([[PTR1]] : !util.ptr<tensor<?xf32>>) +// CHECK-NEXT: ^bb1([[PTR:%.+]]: !util.ptr<tensor<?xf32>>): // 2 preds: ^bb0, ^bb0 +// CHECK-NEXT: [[T:%.+]] = flow.variable.load.indirect [[PTR]] : !util.ptr<tensor<?xf32>> -> tensor<?xf32> // CHECK-NEXT: return [[T]] : tensor<?xf32> "tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> () @@ -57,11 +57,11 @@ // CHECK: flow.variable [[V:@.+]] mutable dense<1.000000e+00> : tensor<1xf32> // CHECK: func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}) attributes {tf_saved_model.exported_names = ["f"]} { -// CHECK-NEXT: [[PTR:%.+]] = flow.variable.address [[V]] : !iree.ptr<tensor<?xf32>> -// CHECK-NEXT: br ^bb1([[PTR]], [[PTR]], [[PTR]] : !iree.ptr<tensor<?xf32>>, !iree.ptr<tensor<?xf32>>, !iree.ptr<tensor<?xf32>>) -// CHECK-NEXT: ^bb1([[PTR0:%.+]]: !iree.ptr<tensor<?xf32>>, [[PTR1:%.+]]: !iree.ptr<tensor<?xf32>>, [[PTR2:%.+]]: !iree.ptr<tensor<?xf32>>): // 2 preds: ^bb0, ^bb1 -// CHECK-NEXT: flow.variable.store.indirect %arg0, [[PTR0]] : tensor<?xf32> -> !iree.ptr<tensor<?xf32>> -// CHECK-NEXT: br ^bb1([[PTR1]], [[PTR2]], [[PTR0]] : !iree.ptr<tensor<?xf32>>, !iree.ptr<tensor<?xf32>>, !iree.ptr<tensor<?xf32>>) +// CHECK-NEXT: [[PTR:%.+]] = flow.variable.address [[V]] : !util.ptr<tensor<?xf32>> +// CHECK-NEXT: br ^bb1([[PTR]], [[PTR]], [[PTR]] : !util.ptr<tensor<?xf32>>, !util.ptr<tensor<?xf32>>, !util.ptr<tensor<?xf32>>) +// CHECK-NEXT: ^bb1([[PTR0:%.+]]: !util.ptr<tensor<?xf32>>, [[PTR1:%.+]]: !util.ptr<tensor<?xf32>>, [[PTR2:%.+]]: !util.ptr<tensor<?xf32>>): // 2 preds: ^bb0, ^bb1 +// CHECK-NEXT: flow.variable.store.indirect %arg0, [[PTR0]] : tensor<?xf32> -> !util.ptr<tensor<?xf32>> +// CHECK-NEXT: br ^bb1([[PTR1]], [[PTR2]], [[PTR0]] : !util.ptr<tensor<?xf32>>, !util.ptr<tensor<?xf32>>, !util.ptr<tensor<?xf32>>) "tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> () func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}, %arg1: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v}) attributes {tf_saved_model.exported_names = ["f"]} {
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/saved_model_to_iree_abi.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/saved_model_to_iree_abi.mlir index a80792c..8f909c0 100644 --- a/integrations/tensorflow/iree_tf_compiler/TF/test/saved_model_to_iree_abi.mlir +++ b/integrations/tensorflow/iree_tf_compiler/TF/test/saved_model_to_iree_abi.mlir
@@ -3,7 +3,7 @@ // CHECK-LABEL: module @binary_func // Should just be a pass through. // CHECK: func @binary_func -// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]],\22r\22:[[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]],\22v\22:1}" +// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]],\22r\22:[[\22stuple\22,[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]]],\22v\22:1}" // CHECK: %[[ARG0_TENSOR:.*]] = hal.tensor.cast %arg0 : !hal.buffer_view -> tensor<16xf32> // CHECK: %[[ARG1_TENSOR:.*]] = hal.tensor.cast %arg1 : !hal.buffer_view -> tensor<16xf32> // CHECK: %[[R:.*]]:2 = call @__inference_binary_func_70(%[[ARG0_TENSOR]], %[[ARG1_TENSOR]]) @@ -12,8 +12,8 @@ // CHECK: return %[[R0_BV]], %[[R1_BV]] : !hal.buffer_view, !hal.buffer_view // CHECK: func private @__inference_binary_func_70 // CHECK-NOT: tf_saved_model -module @binary_func attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} { - func @__inference_binary_func_70(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}, %arg1: tensor<16xf32> {tf._user_specified_name = "b", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = [0]}, tensor<16xf32> {tf_saved_model.index_path = [1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>], tf_saved_model.exported_names = ["binary_func"]} { +builtin.module @binary_func attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} { + builtin.func @__inference_binary_func_70(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}, %arg1: tensor<16xf32> {tf._user_specified_name = "b", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = [0]}, tensor<16xf32> {tf_saved_model.index_path = [1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>], tf_saved_model.exported_names = ["binary_func"]} { %0 = "tf.Identity"(%arg0) {device = ""} : (tensor<16xf32>) -> tensor<16xf32> %1 = "tf.Identity"(%arg1) {device = ""} : (tensor<16xf32>) -> tensor<16xf32> return %0, %1 : tensor<16xf32>, tensor<16xf32> @@ -30,8 +30,8 @@ // CHECK: return %[[R0_BV]] : !hal.buffer_view // CHECK: func private @__inference_unary_func_240 // CHECK-NOT: tf_saved_model -module @unary_func attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} { - func @__inference_unary_func_240(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}) -> (tensor<16xf32> {tf_saved_model.index_path = []}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>], tf_saved_model.exported_names = ["unary_func"]} { +builtin.module @unary_func attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} { + builtin.func @__inference_unary_func_240(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}) -> (tensor<16xf32> {tf_saved_model.index_path = []}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>], tf_saved_model.exported_names = ["unary_func"]} { %0 = "tf.Identity"(%arg0) {device = ""} : (tensor<16xf32>) -> tensor<16xf32> return %0 : tensor<16xf32> } @@ -40,7 +40,7 @@ // ----- // CHECK-LABEL: module @return_list // CHECK: func @return_list -// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]],\22r\22:[[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]],\22v\22:1}" +// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]],\22r\22:[[\22stuple\22,[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]]],\22v\22:1}" // CHECK: %[[ARG0_TENSOR:.*]] = hal.tensor.cast %arg0 : !hal.buffer_view -> tensor<16xf32> // CHECK: %[[ARG1_TENSOR:.*]] = hal.tensor.cast %arg1 : !hal.buffer_view -> tensor<16xf32> // CHECK: %[[R:.+]]:2 = call @__inference_return_list_260(%[[ARG0_TENSOR]], %[[ARG1_TENSOR]]) @@ -49,8 +49,8 @@ // CHECK: return %[[R0_BV]], %[[R1_BV]] : !hal.buffer_view, !hal.buffer_view // CHECK: func private @__inference_return_list_260 // CHECK-NOT: tf_saved_model -module @return_list attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} { - func @__inference_return_list_260(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}, %arg1: tensor<16xf32> {tf._user_specified_name = "b", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = [0]}, tensor<16xf32> {tf_saved_model.index_path = [1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>], tf_saved_model.exported_names = ["return_list"]} { +builtin.module @return_list attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} { + builtin.func @__inference_return_list_260(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}, %arg1: tensor<16xf32> {tf._user_specified_name = "b", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = [0]}, tensor<16xf32> {tf_saved_model.index_path = [1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>], tf_saved_model.exported_names = ["return_list"]} { %0 = "tf.Identity"(%arg0) {device = ""} : (tensor<16xf32>) -> tensor<16xf32> %1 = "tf.Identity"(%arg1) {device = ""} : (tensor<16xf32>) -> tensor<16xf32> return %0, %1 : tensor<16xf32>, tensor<16xf32> @@ -59,53 +59,54 @@ // ----- // CHECK-LABEL: module @dict_nest -// CHECK: func @dict_nest +// CHECK: func @dict_nest(%arg0: !util.list<?>, %arg1: !hal.buffer_view) -> (!util.list<?>, !util.list<?>) // CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22sdict\22,[\22dict\22,[\22sdict\22,[\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22b\22,[\22ndarray\22,\22f32\22,1,16]]]],[\22list\22,[\22slist\22,[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]]]],[\22ndarray\22,\22f32\22,0]],\22r\22:[[\22sdict\22,[\22dict\22,[\22sdict\22,[\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22b\22,[\22ndarray\22,\22f32\22,1,16]]]],[\22list\22,[\22stuple\22,[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]]]]],\22v\22:1}" // CHECK: %[[c0:.+]] = constant 0 : index -// CHECK: %[[L0:.+]] = iree.list.get %arg0[%[[c0]]] : !iree.list<?> -> !iree.list<?> +// CHECK: %[[L0:.+]] = util.list.get %arg0[%[[c0]]] : !util.list<?> -> !util.list<?> // CHECK: %[[c0_0:.+]] = constant 0 : index -// CHECK: %[[L1:.+]] = iree.list.get %[[L0]][%[[c0_0]]] : !iree.list<?> -> !hal.buffer_view +// CHECK: %[[L1:.+]] = util.list.get %[[L0]][%[[c0_0]]] : !util.list<?> -> !hal.buffer_view // CHECK: %[[L1_TENSOR:.+]] = hal.tensor.cast %[[L1]] : !hal.buffer_view -> tensor<16xf32> // CHECK: %[[c1:.+]] = constant 1 : index -// CHECK: %[[L2:.+]] = iree.list.get %[[L0]][%[[c1]]] : !iree.list<?> -> !hal.buffer_view +// CHECK: %[[L2:.+]] = util.list.get %[[L0]][%[[c1]]] : !util.list<?> -> !hal.buffer_view // CHECK: %[[L2_TENSOR:.+]] = hal.tensor.cast %[[L2]] : !hal.buffer_view -> tensor<16xf32> // CHECK: %[[c1_1:.+]] = constant 1 : index -// CHECK: %[[L3:.+]] = iree.list.get %arg0[%[[c1_1]]] : !iree.list<?> -> !iree.list<?> +// CHECK: %[[L3:.+]] = util.list.get %arg0[%[[c1_1]]] : !util.list<?> -> !util.list<?> // CHECK: %[[c0_2:.+]] = constant 0 : index -// CHECK: %[[L4:.+]] = iree.list.get %[[L3]][%[[c0_2]]] : !iree.list<?> -> !hal.buffer_view +// CHECK: %[[L4:.+]] = util.list.get %[[L3]][%[[c0_2]]] : !util.list<?> -> !hal.buffer_view // CHECK: %[[L4_TENSOR:.+]] = hal.tensor.cast %[[L4]] : !hal.buffer_view -> tensor<16xf32> // CHECK: %[[c1_3:.+]] = constant 1 : index -// CHECK: %[[L5:.+]] = iree.list.get %[[L3]][%[[c1_3]]] : !iree.list<?> -> !hal.buffer_view +// CHECK: %[[L5:.+]] = util.list.get %[[L3]][%[[c1_3]]] : !util.list<?> -> !hal.buffer_view // CHECK: %[[L5_TENSOR:.+]] = hal.tensor.cast %[[L5]] : !hal.buffer_view -> tensor<16xf32> // CHECK: %[[ARG1_TENSOR:.+]] = hal.tensor.cast %arg1 : !hal.buffer_view -> tensor<f32> // CHECK: %[[RESULT:.+]]:4 = call @__inference_dict_nest_190(%[[L1_TENSOR]], %[[L2_TENSOR]], %[[L4_TENSOR]], %[[L5_TENSOR]], %[[ARG1_TENSOR]]) : (tensor<16xf32>, tensor<16xf32>, tensor<16xf32>, tensor<16xf32>, tensor<f32>) -> (tensor<16xf32>, tensor<16xf32>, tensor<16xf32>, tensor<16xf32>) // CHECK: %[[c2:.+]] = constant 2 : index -// CHECK: %[[R7:.+]] = iree.list.create %[[c2]] : !iree.list<?> -// CHECK: %[[c2_4:.+]] = constant 2 : index -// CHECK: %[[R8:.+]] = iree.list.create %[[c2_4]] : !iree.list<?> +// CHECK: %[[R7:.+]] = util.list.create %[[c2]] : !util.list<?> +// CHECK: util.list.resize %[[R7]], %[[c2]] // CHECK: %[[R0_BV:.+]] = hal.tensor.cast %[[RESULT]]#0 : tensor<16xf32> -> !hal.buffer_view -// CHECK: %[[c0_5:.+]] = constant 0 : index -// CHECK: iree.list.set %[[R8]][%[[c0_5]]], %[[R0_BV]] : !hal.buffer_view -> !iree.list<?> +// CHECK: %[[c0_4:.+]] = constant 0 : index +// CHECK: util.list.set %[[R7]][%[[c0_4]]], %[[R0_BV]] : !hal.buffer_view -> !util.list<?> // CHECK: %[[R1_BV:.+]] = hal.tensor.cast %[[RESULT]]#1 : tensor<16xf32> -> !hal.buffer_view -// CHECK: %[[c1_6:.+]] = constant 1 : index -// CHECK: iree.list.set %[[R8]][%[[c1_6]]], %[[R1_BV]] : !hal.buffer_view -> !iree.list<?> -// CHECK: %[[c0_7:.+]] = constant 0 : index -// CHECK: iree.list.set %[[R7]][%[[c0_7]]], %[[R8]] : !iree.list<?> -> !iree.list<?> +// CHECK: %[[c1_5:.+]] = constant 1 : index +// CHECK: util.list.set %[[R7]][%[[c1_5]]], %[[R1_BV]] : !hal.buffer_view -> !util.list<?> // CHECK: %[[c2_8:.+]] = constant 2 : index -// CHECK: %[[R9:.+]] = iree.list.create %[[c2_8]] : !iree.list<?> +// CHECK: %[[R9:.+]] = util.list.create %[[c2_8]] : !util.list<?> +// CHECK: util.list.resize %[[R9]], %[[c2_8]] // CHECK: %[[R2_BV:.+]] = hal.tensor.cast %[[RESULT]]#2 : tensor<16xf32> -> !hal.buffer_view // CHECK: %[[c0_9:.+]] = constant 0 : index -// CHECK: iree.list.set %[[R9]][%[[c0_9]]], %[[R2_BV]] : !hal.buffer_view -> !iree.list<?> +// CHECK: util.list.set %[[R9]][%[[c0_9]]], %[[R2_BV]] : !hal.buffer_view -> !util.list<?> // CHECK: %[[R3_BV:.+]] = hal.tensor.cast %[[RESULT]]#3 : tensor<16xf32> -> !hal.buffer_view // CHECK: %[[c1_10:.+]] = constant 1 : index -// CHECK: iree.list.set %[[R9]][%[[c1_10]]], %[[R3_BV]] : !hal.buffer_view -> !iree.list<?> -// CHECK: %[[c1_11:.+]] = constant 1 : index -// CHECK: iree.list.set %[[R7]][%[[c1_11]]], %[[R9]] : !iree.list<?> -> !iree.list<?> -// return %[[R7]] : !iree.list<?> +// CHECK: util.list.set %[[R9]][%[[c1_10]]], %[[R3_BV]] : !hal.buffer_view -> !util.list<?> +// return %[[R7]], %[[R8]] : !util.list<?>, !util.list<?> // CHECK: func private @__inference_dict_nest_190 // CHECK-NOT: tf_saved_model -module @dict_nest attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} { - func @__inference_dict_nest_190(%arg0: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "dict", "a"]}, %arg1: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "dict", "b"]}, %arg2: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "list", 0]}, %arg3: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "list", 1]}, %arg4: tensor<f32> {tf._user_specified_name = "scalar", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = ["dict", "a"]}, tensor<16xf32> {tf_saved_model.index_path = ["dict", "b"]}, tensor<16xf32> {tf_saved_model.index_path = ["list", 0]}, tensor<16xf32> {tf_saved_model.index_path = ["list", 1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>, #tf_type.shape<16>, #tf_type.shape<16>, #tf_type.shape<>], tf_saved_model.exported_names = ["dict_nest"]} { +builtin.module @dict_nest attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} { + builtin.func @__inference_dict_nest_190( + %arg0: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "dict", "a"]}, + %arg1: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "dict", "b"]}, + %arg2: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "list", 0]}, + %arg3: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "list", 1]}, + %arg4: tensor<f32> {tf._user_specified_name = "scalar", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = ["dict", "a"]}, tensor<16xf32> {tf_saved_model.index_path = ["dict", "b"]}, tensor<16xf32> {tf_saved_model.index_path = ["list", 0]}, tensor<16xf32> {tf_saved_model.index_path = ["list", 1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>, #tf_type.shape<16>, #tf_type.shape<16>, #tf_type.shape<>], tf_saved_model.exported_names = ["dict_nest"]} { %0 = "tf.Identity"(%arg0) {device = ""} : (tensor<16xf32>) -> tensor<16xf32> %1 = "tf.Identity"(%arg1) {device = ""} : (tensor<16xf32>) -> tensor<16xf32> %2 = "tf.Identity"(%arg2) {device = ""} : (tensor<16xf32>) -> tensor<16xf32> @@ -116,14 +117,20 @@ // ----- // CHECK-LABEL: module @kwargs -// CHECK: func @dict_nest -// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22sdict\22,[\22list\22,[\22slist\22,[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]]]],[\22ndarray\22,\22f32\22,0],[\22sdict_kwargs\22,[\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22b\22,[\22ndarray\22,\22f32\22,1,16]]]],\22r\22:[[\22sdict\22,[\22dict\22,[\22sdict\22,[\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22b\22,[\22ndarray\22,\22f32\22,1,16]]]],[\22list\22,[\22stuple\22,[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]]]]],\22v\22:1}" -module @kwargs attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} { - func @__inference_dict_nest_190(%arg0: tensor<16xf32> {tf_saved_model.index_path = ["a"]}, %arg1: tensor<16xf32> {tf_saved_model.index_path = ["b"]}, %arg2: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "list", 0]}, %arg3: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "list", 1]}, %arg4: tensor<f32> {tf._user_specified_name = "scalar", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = ["dict", "a"]}, tensor<16xf32> {tf_saved_model.index_path = ["dict", "b"]}, tensor<16xf32> {tf_saved_model.index_path = ["list", 0]}, tensor<16xf32> {tf_saved_model.index_path = ["list", 1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>, #tf_type.shape<16>, #tf_type.shape<16>, #tf_type.shape<>], tf_saved_model.exported_names = ["dict_nest"]} { +// CHECK: func @dict_nest(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !util.list<?> +// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22named\22,\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22named\22,\22b\22,[\22ndarray\22,\22f32\22,1,16]],[\22named\22,\22scalar\22,[\22ndarray\22,\22f32\22,0]]],\22r\22:[[\22sdict\22,[\22dict\22,[\22sdict\22,[\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22b\22,[\22ndarray\22,\22f32\22,1,16]],[\22scalar\22,[\22ndarray\22,\22f32\22,0]]]]]],\22v\22:1}" +builtin.module @kwargs attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} { + builtin.func @__inference_dict_nest_190( + %arg0: tensor<16xf32> {tf_saved_model.index_path = ["a"]}, + %arg1: tensor<16xf32> {tf_saved_model.index_path = ["b"]}, + %arg2: tensor<f32> {tf._user_specified_name = "scalar", tf_saved_model.index_path = ["scalar"]}) -> + (tensor<16xf32> {tf_saved_model.index_path = ["dict", "a"]}, + tensor<16xf32> {tf_saved_model.index_path = ["dict", "b"]}, + tensor<f32> {tf_saved_model.index_path = ["dict", "scalar"]}) + attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>, #tf_type.shape<>], tf_saved_model.exported_names = ["dict_nest"]} { %0 = "tf.Identity"(%arg0) {device = ""} : (tensor<16xf32>) -> tensor<16xf32> %1 = "tf.Identity"(%arg1) {device = ""} : (tensor<16xf32>) -> tensor<16xf32> - %2 = "tf.Identity"(%arg2) {device = ""} : (tensor<16xf32>) -> tensor<16xf32> - %3 = "tf.Identity"(%arg3) {device = ""} : (tensor<16xf32>) -> tensor<16xf32> - return %0, %1, %2, %3 : tensor<16xf32>, tensor<16xf32>, tensor<16xf32>, tensor<16xf32> + %2 = "tf.Identity"(%arg2) {device = ""} : (tensor<f32>) -> tensor<f32> + return %0, %1, %2 : tensor<16xf32>, tensor<16xf32>, tensor<f32> } }
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/BUILD b/integrations/tensorflow/iree_tf_compiler/TFL/BUILD index 1395442..0abd9f7 100644 --- a/integrations/tensorflow/iree_tf_compiler/TFL/BUILD +++ b/integrations/tensorflow/iree_tf_compiler/TFL/BUILD
@@ -26,9 +26,9 @@ ], deps = [ "@iree//iree/compiler/Dialect/Flow/IR", - "@iree//iree/compiler/Dialect/IREE/IR", "@iree//iree/compiler/Dialect/Shape/IR", "@iree//iree/compiler/Dialect/Shape/Transforms", + "@iree//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR", "@llvm-project//mlir:Pass",
diff --git a/integrations/tensorflow/iree_tf_compiler/iree-import-tf-main.cpp b/integrations/tensorflow/iree_tf_compiler/iree-import-tf-main.cpp index ee95f1e..8edb63d 100644 --- a/integrations/tensorflow/iree_tf_compiler/iree-import-tf-main.cpp +++ b/integrations/tensorflow/iree_tf_compiler/iree-import-tf-main.cpp
@@ -163,6 +163,9 @@ llvm::cl::desc("Prettifies TF debug information to make it easier " "to look at"), llvm::cl::init(true)); + static llvm::cl::opt<bool> useTosa( + "use-tosa", llvm::cl::desc("Use tosa as the intermediate IR"), + llvm::cl::init(false)); // Register any command line options. registerAsmPrinterCLOptions(); @@ -222,7 +225,7 @@ pm.addPass(iree_integrations::TF::createPrettifyDebugInfoPass()); } - iree_integrations::TF::buildTFImportPassPipeline(pm); + iree_integrations::TF::buildTFImportPassPipeline(pm, useTosa); if (failed(pm.run(*module))) { llvm::errs() << "Running iree-import-tf TF import pass pipeline failed " "(see diagnostics)\n";
diff --git a/integrations/tensorflow/iree_tf_compiler/iree-tf-opt-main.cpp b/integrations/tensorflow/iree_tf_compiler/iree-tf-opt-main.cpp index 3c79799..c7d8a74 100644 --- a/integrations/tensorflow/iree_tf_compiler/iree-tf-opt-main.cpp +++ b/integrations/tensorflow/iree_tf_compiler/iree-tf-opt-main.cpp
@@ -12,7 +12,7 @@ #include "iree/compiler/Dialect/Flow/IR/FlowDialect.h" #include "iree/compiler/Dialect/HAL/IR/HALDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" #include "iree/compiler/InputConversion/Common/Passes.h" #include "iree/compiler/InputConversion/MHLO/Passes.h" #include "iree/compiler/InputConversion/TOSA/Passes.h" @@ -34,7 +34,7 @@ mlir::registerXLADialects(registry); registry.insert<mlir::iree_compiler::IREE::Flow::FlowDialect, mlir::iree_compiler::IREE::HAL::HALDialect, - mlir::iree_compiler::IREEDialect>(); + mlir::iree_compiler::IREE::Util::UtilDialect>(); // Select IREE input passes. mlir::iree_compiler::registerCommonInputConversionPasses();
diff --git a/iree/base/BUILD b/iree/base/BUILD index 95bf270..437febd 100644 --- a/iree/base/BUILD +++ b/iree/base/BUILD
@@ -25,6 +25,8 @@ "assert.h", "status.c", "status.h", + "string_builder.c", + "string_builder.h", "string_view.c", "string_view.h", "time.c", @@ -66,6 +68,16 @@ ) cc_test( + name = "string_builder_test", + srcs = ["string_builder_test.cc"], + deps = [ + ":base", + "//iree/testing:gtest", + "//iree/testing:gtest_main", + ], +) + +cc_test( name = "string_view_test", srcs = ["string_view_test.cc"], deps = [
diff --git a/iree/base/CMakeLists.txt b/iree/base/CMakeLists.txt index 492825f..d8ffd2e 100644 --- a/iree/base/CMakeLists.txt +++ b/iree/base/CMakeLists.txt
@@ -18,6 +18,8 @@ "assert.h" "status.c" "status.h" + "string_builder.c" + "string_builder.h" "string_view.c" "string_view.h" "time.c" @@ -56,6 +58,17 @@ iree_cc_test( NAME + string_builder_test + SRCS + "string_builder_test.cc" + DEPS + ::base + iree::testing::gtest + iree::testing::gtest_main +) + +iree_cc_test( + NAME string_view_test SRCS "string_view_test.cc"
diff --git a/iree/base/allocator.h b/iree/base/allocator.h index 084fe2d..86ba6db 100644 --- a/iree/base/allocator.h +++ b/iree/base/allocator.h
@@ -11,6 +11,7 @@ #include <stdint.h> #include <string.h> +#include "iree/base/alignment.h" #include "iree/base/attributes.h" #include "iree/base/config.h" #include "iree/base/status.h" @@ -246,6 +247,11 @@ return v; } +// Returns true if the allocator is `iree_allocator_null()`. +static inline bool iree_allocator_is_null(iree_allocator_t allocator) { + return allocator.ctl == NULL; +} + #ifdef __cplusplus } // extern "C" #endif // __cplusplus
diff --git a/iree/base/api.h b/iree/base/api.h index 0f0cbfb..52390de 100644 --- a/iree/base/api.h +++ b/iree/base/api.h
@@ -91,14 +91,15 @@ #ifndef IREE_BASE_API_H_ #define IREE_BASE_API_H_ -#include "iree/base/alignment.h" // IWYU pragma: export -#include "iree/base/allocator.h" // IWYU pragma: export -#include "iree/base/assert.h" // IWYU pragma: export -#include "iree/base/attributes.h" // IWYU pragma: export -#include "iree/base/config.h" // IWYU pragma: export -#include "iree/base/status.h" // IWYU pragma: export -#include "iree/base/string_view.h" // IWYU pragma: export -#include "iree/base/time.h" // IWYU pragma: export +#include "iree/base/alignment.h" // IWYU pragma: export +#include "iree/base/allocator.h" // IWYU pragma: export +#include "iree/base/assert.h" // IWYU pragma: export +#include "iree/base/attributes.h" // IWYU pragma: export +#include "iree/base/config.h" // IWYU pragma: export +#include "iree/base/status.h" // IWYU pragma: export +#include "iree/base/string_builder.h" // IWYU pragma: export +#include "iree/base/string_view.h" // IWYU pragma: export +#include "iree/base/time.h" // IWYU pragma: export #ifdef __cplusplus extern "C" {
diff --git a/iree/base/string_builder.c b/iree/base/string_builder.c new file mode 100644 index 0000000..39b92f1 --- /dev/null +++ b/iree/base/string_builder.c
@@ -0,0 +1,143 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "iree/base/string_builder.h" + +#include "iree/base/alignment.h" + +// Minimum alignment for storage buffer allocations. +#define IREE_STRING_BUILDER_ALIGNMENT 128 + +IREE_API_EXPORT void iree_string_builder_initialize( + iree_allocator_t allocator, iree_string_builder_t* out_builder) { + memset(out_builder, 0, sizeof(*out_builder)); + out_builder->allocator = allocator; +} + +IREE_API_EXPORT void iree_string_builder_deinitialize( + iree_string_builder_t* builder) { + if (builder->buffer != NULL) { + iree_allocator_free(builder->allocator, builder->buffer); + } + memset(builder, 0, sizeof(*builder)); +} + +IREE_API_EXPORT const char* iree_string_builder_buffer( + const iree_string_builder_t* builder) { + return builder->buffer; +} + +IREE_API_EXPORT iree_host_size_t +iree_string_builder_size(const iree_string_builder_t* builder) { + return builder->size; +} + +IREE_API_EXPORT iree_host_size_t +iree_string_builder_capacity(const iree_string_builder_t* builder) { + return builder->capacity; +} + +IREE_API_EXPORT iree_string_view_t +iree_string_builder_view(const iree_string_builder_t* builder) { + return iree_make_string_view(iree_string_builder_buffer(builder), + iree_string_builder_size(builder)); +} + +IREE_API_EXPORT char* iree_string_builder_take_storage( + iree_string_builder_t* builder) { + char* buffer = builder->buffer; + if (builder->size == 0) { + // In empty cases we return NULL and need to clean up inline as the user is + // expecting to be able to discard the builder after this returns. + if (builder->buffer != NULL) { + iree_allocator_free(builder->allocator, builder->buffer); + builder->buffer = NULL; + } + buffer = NULL; + } + builder->size = 0; + builder->capacity = 0; + builder->buffer = NULL; + return buffer; +} + +IREE_API_EXPORT iree_status_t iree_string_builder_reserve( + iree_string_builder_t* builder, iree_host_size_t minimum_capacity) { + if (iree_allocator_is_null(builder->allocator)) return iree_ok_status(); + iree_host_size_t new_capacity = builder->capacity; + if (builder->capacity < minimum_capacity) { + new_capacity = + iree_host_align(minimum_capacity, IREE_STRING_BUILDER_ALIGNMENT); + } + if (builder->capacity >= new_capacity) return iree_ok_status(); + IREE_RETURN_IF_ERROR(iree_allocator_realloc(builder->allocator, new_capacity, + (void**)&builder->buffer)); + builder->buffer[builder->size] = 0; + builder->capacity = new_capacity; + return iree_ok_status(); +} + +IREE_API_EXPORT iree_status_t iree_string_builder_append_string( + iree_string_builder_t* builder, iree_string_view_t value) { + // Ensure capacity for the value + NUL terminator. + IREE_RETURN_IF_ERROR( + iree_string_builder_reserve(builder, builder->size + value.size + 1)); + if (builder->buffer != NULL) { + // Only copy the bytes if we are not doing a size calculation. + memcpy(builder->buffer + builder->size, value.data, value.size); + builder->buffer[builder->size + value.size] = 0; // NUL + } + builder->size += value.size; + return iree_ok_status(); +} + +IREE_API_EXPORT iree_status_t iree_string_builder_append_cstring( + iree_string_builder_t* builder, const char* value) { + return iree_string_builder_append_string(builder, + iree_make_cstring_view(value)); +} + +static iree_status_t iree_string_builder_append_format_impl( + iree_string_builder_t* builder, const char* format, va_list varargs_0, + va_list varargs_1) { + // Try to directly print into the buffer we have. This may work if we have + // capacity but otherwise will yield us the size we need to grow our buffer. + int n = vsnprintf(builder->buffer ? builder->buffer + builder->size : NULL, + builder->buffer ? builder->capacity - builder->size : 0, + format, varargs_0); + if (IREE_UNLIKELY(n < 0)) { + return iree_make_status(IREE_STATUS_INTERNAL, "printf try failed"); + } + if (n < builder->capacity - builder->size) { + // Printed into the buffer. + builder->size += n; + return iree_ok_status(); + } + + // Reserve new minimum capacity. + IREE_RETURN_IF_ERROR(iree_string_builder_reserve( + builder, iree_string_builder_size(builder) + n + /*NUL*/ 1)); + + // Try printing again. + vsnprintf(builder->buffer ? builder->buffer + builder->size : NULL, + builder->buffer ? builder->capacity - builder->size : 0, format, + varargs_1); + builder->size += n; + return iree_ok_status(); +} + +IREE_API_EXPORT iree_status_t IREE_PRINTF_ATTRIBUTE(2, 3) + iree_string_builder_append_format(iree_string_builder_t* builder, + const char* format, ...) { + va_list varargs_0, varargs_1; + va_start(varargs_0, format); + va_start(varargs_1, format); + iree_status_t status = iree_string_builder_append_format_impl( + builder, format, varargs_0, varargs_1); + va_end(varargs_1); + va_end(varargs_0); + return status; +}
diff --git a/iree/base/string_builder.h b/iree/base/string_builder.h new file mode 100644 index 0000000..a6ba16a --- /dev/null +++ b/iree/base/string_builder.h
@@ -0,0 +1,120 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#ifndef IREE_BASE_STRING_BUILDER_H_ +#define IREE_BASE_STRING_BUILDER_H_ + +#include <stdbool.h> +#include <string.h> + +#include "iree/base/allocator.h" +#include "iree/base/attributes.h" +#include "iree/base/status.h" +#include "iree/base/string_view.h" + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// Lightweight string builder. +// Used to dynamically produce strings in a growable buffer. +// +// Usage: +// iree_string_builder_t builder; +// iree_string_builder_initialize(iree_allocator_system(), &builder); +// IREE_RETURN_IF_ERROR(iree_string_builder_append_cstring(&builder, "hel")); +// IREE_RETURN_IF_ERROR(iree_string_builder_append_cstring(&builder, "lo")); +// fprintf(stream, "%.*s", (int)iree_string_builder_size(&builder), +// iree_string_builder_buffer(&builder)); +// iree_string_builder_deinitialize(&builder); +// +// Usage for preallocation: +// iree_string_builder_t builder; +// iree_string_builder_initialize(iree_allocator_null(), &builder); +// IREE_RETURN_IF_ERROR(iree_string_builder_append_cstring(&builder, "123")); +// // str_length is total number of characters (excluding NUL). +// iree_host_size_t str_length = iree_string_builder_size(builder); +// iree_string_builder_deinitialize(&builder); +typedef struct iree_string_builder_t { + // Allocator used for buffer storage. + // May be iree_allocator_null() to have the builder total up the required + // size. + iree_allocator_t allocator; + // Allocated storage buffer, if any. + char* buffer; + // Total length of the string in the buffer in characters (excluding NUL). + iree_host_size_t size; + // Total allocated buffer capacity in bytes. + iree_host_size_t capacity; +} iree_string_builder_t; + +// Initializes a string builder in |out_builder| with the given |allocator|. +IREE_API_EXPORT void iree_string_builder_initialize( + iree_allocator_t allocator, iree_string_builder_t* out_builder); + +// Deinitializes |builder| and releases allocated storage. +IREE_API_EXPORT void iree_string_builder_deinitialize( + iree_string_builder_t* builder); + +// Returns a pointer into the builder storage. +// The pointer is only valid so long as the string builder is initialized and +// unmodified. +IREE_API_EXPORT const char* iree_string_builder_buffer( + const iree_string_builder_t* builder); + +// Returns the total length of the string in the buffer in characters (excluding +// NUL). +IREE_API_EXPORT iree_host_size_t +iree_string_builder_size(const iree_string_builder_t* builder); + +// Returns the total allocated buffer capacity in bytes. +IREE_API_EXPORT iree_host_size_t +iree_string_builder_capacity(const iree_string_builder_t* builder); + +// Returns a string view into the builder storage. +// The pointer is only valid so long as the string builder is initialized and +// unmodified. +IREE_API_EXPORT iree_string_view_t +iree_string_builder_view(const iree_string_builder_t* builder); + +// Releases the storage from the builder and returns ownership to the caller. +// The caller must free the string using the same allocator used by the builder. +// Returns NULL if the string builder is empty. +// +// Usage: +// iree_string_builder_t builder; +// iree_string_builder_initialize(iree_allocator_system(), &builder); +// ... +// char* buffer = iree_string_builder_take_storage(&builder); +// iree_host_size_t buffer_size = iree_string_builder_size(&builder); +// iree_string_builder_deinitialize(&builder); +// ... +// iree_allocator_free(iree_allocator_system(), buffer); +IREE_API_EXPORT IREE_MUST_USE_RESULT char* iree_string_builder_take_storage( + iree_string_builder_t* builder); + +// Reserves storage for at least |minimum_capacity|. +IREE_API_EXPORT iree_status_t iree_string_builder_reserve( + iree_string_builder_t* builder, iree_host_size_t minimum_capacity); + +// Appends a string to the builder. +IREE_API_EXPORT iree_status_t iree_string_builder_append_string( + iree_string_builder_t* builder, iree_string_view_t value); + +// Appends a NUL-terminated C string to the builder. +IREE_API_EXPORT iree_status_t iree_string_builder_append_cstring( + iree_string_builder_t* builder, const char* value); + +// Appends a printf-style formatted string to the builder. +IREE_API_EXPORT IREE_PRINTF_ATTRIBUTE(2, 3) iree_status_t + iree_string_builder_append_format(iree_string_builder_t* builder, + const char* format, ...); + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif // IREE_BASE_STRING_BUILDER_H_
diff --git a/iree/base/string_builder_test.cc b/iree/base/string_builder_test.cc new file mode 100644 index 0000000..fad7034 --- /dev/null +++ b/iree/base/string_builder_test.cc
@@ -0,0 +1,164 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include <string> + +#include "iree/base/api.h" +#include "iree/testing/gtest.h" +#include "iree/testing/status_matchers.h" + +namespace { + +struct StringBuilder { + static StringBuilder MakeSystem() { + iree_string_builder_t builder; + iree_string_builder_initialize(iree_allocator_system(), &builder); + return StringBuilder(builder); + } + + static StringBuilder MakeEmpty() { + iree_string_builder_t builder; + iree_string_builder_initialize(iree_allocator_null(), &builder); + return StringBuilder(builder); + } + + explicit StringBuilder(iree_string_builder_t builder) + : builder(std::move(builder)) {} + + ~StringBuilder() { iree_string_builder_deinitialize(&builder); } + + operator iree_string_builder_t*() { return &builder; } + + std::string ToString() const { + return std::string(builder.buffer, builder.size); + } + + iree_string_builder_t builder; +}; + +TEST(StringBuilderTest, QueryEmpty) { + auto builder = StringBuilder::MakeEmpty(); + EXPECT_EQ(iree_string_builder_buffer(builder), + static_cast<const char*>(NULL)); + EXPECT_EQ(iree_string_builder_size(builder), 0); + EXPECT_EQ(iree_string_builder_capacity(builder), 0); + EXPECT_TRUE(iree_string_view_is_empty(iree_string_builder_view(builder))); + EXPECT_EQ(iree_string_builder_take_storage(builder), + static_cast<char*>(NULL)); +} + +TEST(StringBuilderTest, QueryAppendString) { + auto builder = StringBuilder::MakeEmpty(); + EXPECT_EQ(iree_string_builder_size(builder), 0); + IREE_EXPECT_OK(iree_string_builder_append_cstring(builder, "")); + EXPECT_EQ(iree_string_builder_size(builder), 0); + IREE_EXPECT_OK(iree_string_builder_append_cstring(builder, "a")); + EXPECT_EQ(iree_string_builder_size(builder), 1); + IREE_EXPECT_OK(iree_string_builder_append_cstring(builder, "abc")); + EXPECT_EQ(iree_string_builder_size(builder), 1 + 3); + IREE_EXPECT_OK(iree_string_builder_append_cstring(builder, "")); + EXPECT_EQ(iree_string_builder_size(builder), 1 + 3); + + char kLongString[1024]; + memset(kLongString, 'x', IREE_ARRAYSIZE(kLongString)); + IREE_EXPECT_OK(iree_string_builder_append_string( + builder, + iree_make_string_view(kLongString, IREE_ARRAYSIZE(kLongString)))); + EXPECT_EQ(iree_string_builder_size(builder), + 1 + 3 + IREE_ARRAYSIZE(kLongString)); +} + +TEST(StringBuilderTest, QueryFormat) { + auto builder = StringBuilder::MakeEmpty(); + EXPECT_EQ(iree_string_builder_size(builder), 0); + IREE_EXPECT_OK(iree_string_builder_append_format(builder, "")); + EXPECT_EQ(iree_string_builder_size(builder), 0); + IREE_EXPECT_OK(iree_string_builder_append_format(builder, "abc")); + EXPECT_EQ(iree_string_builder_size(builder), 3); + IREE_EXPECT_OK(iree_string_builder_append_format(builder, "a%cc", 'b')); + EXPECT_EQ(iree_string_builder_size(builder), 6); + IREE_EXPECT_OK(iree_string_builder_append_format(builder, "%*c", 1024, 'x')); + EXPECT_EQ(iree_string_builder_size(builder), 6 + 1024); +} + +TEST(StringBuilderTest, Empty) { + auto builder = StringBuilder::MakeSystem(); + EXPECT_EQ(iree_string_builder_size(builder), 0); + EXPECT_GE(iree_string_builder_capacity(builder), 0); + EXPECT_TRUE(iree_string_view_is_empty(iree_string_builder_view(builder))); + EXPECT_EQ(iree_string_builder_take_storage(builder), + static_cast<char*>(NULL)); +} + +TEST(StringBuilderTest, AppendString) { + auto builder = StringBuilder::MakeSystem(); + EXPECT_EQ(iree_string_builder_size(builder), 0); + IREE_EXPECT_OK(iree_string_builder_append_cstring(builder, "")); + EXPECT_EQ(builder.ToString(), ""); + IREE_EXPECT_OK(iree_string_builder_append_cstring(builder, "a")); + EXPECT_EQ(builder.ToString(), "a"); + EXPECT_EQ(strlen(builder.builder.buffer), 1); // NUL check + IREE_EXPECT_OK(iree_string_builder_append_cstring(builder, "abc")); + EXPECT_EQ(builder.ToString(), "aabc"); + EXPECT_EQ(strlen(builder.builder.buffer), 1 + 3); // NUL check + IREE_EXPECT_OK(iree_string_builder_append_cstring(builder, "")); + EXPECT_EQ(builder.ToString(), "aabc"); + EXPECT_EQ(iree_string_builder_size(builder), 1 + 3); + EXPECT_EQ(strlen(builder.builder.buffer), 1 + 3); // NUL check + + char kLongString[1024]; + memset(kLongString, 'x', IREE_ARRAYSIZE(kLongString)); + IREE_EXPECT_OK(iree_string_builder_append_string( + builder, + iree_make_string_view(kLongString, IREE_ARRAYSIZE(kLongString)))); + EXPECT_EQ(iree_string_builder_size(builder), + 1 + 3 + IREE_ARRAYSIZE(kLongString)); + EXPECT_EQ(strlen(builder.builder.buffer), + 1 + 3 + IREE_ARRAYSIZE(kLongString)); // NUL check + EXPECT_EQ(builder.ToString(), + std::string("aabc") + + std::string(kLongString, IREE_ARRAYSIZE(kLongString))); +} + +TEST(StringBuilderTest, TakeStorage) { + auto builder = StringBuilder::MakeSystem(); + EXPECT_EQ(iree_string_builder_size(builder), 0); + IREE_EXPECT_OK(iree_string_builder_append_cstring(builder, "a")); + EXPECT_EQ(builder.ToString(), "a"); + IREE_EXPECT_OK(iree_string_builder_append_cstring(builder, "abc")); + EXPECT_EQ(builder.ToString(), "aabc"); + EXPECT_EQ(iree_string_builder_size(builder), 1 + 3); + EXPECT_EQ(strlen(builder.builder.buffer), + 1 + 3); // NUL check + + char* storage = iree_string_builder_take_storage(builder); + EXPECT_EQ(iree_string_builder_buffer(builder), + static_cast<const char*>(NULL)); + EXPECT_EQ(iree_string_builder_size(builder), 0); + EXPECT_EQ(iree_string_builder_capacity(builder), 0); + EXPECT_NE(storage, static_cast<char*>(NULL)); + EXPECT_STREQ(storage, "aabc"); + EXPECT_EQ(builder.builder.buffer, static_cast<char*>(NULL)); + iree_allocator_free(builder.builder.allocator, storage); +} + +TEST(StringBuilderTest, Format) { + auto builder = StringBuilder::MakeSystem(); + EXPECT_EQ(builder.ToString(), ""); + IREE_EXPECT_OK(iree_string_builder_append_format(builder, "")); + EXPECT_EQ(builder.ToString(), ""); + IREE_EXPECT_OK(iree_string_builder_append_format(builder, "abc")); + EXPECT_EQ(builder.ToString(), "abc"); + IREE_EXPECT_OK(iree_string_builder_append_format(builder, "a%cc", 'b')); + EXPECT_EQ(builder.ToString(), "abcabc"); + IREE_EXPECT_OK(iree_string_builder_append_format(builder, "%*c", 1024, 'x')); + EXPECT_EQ(iree_string_builder_size(builder), 6 + 1024); + EXPECT_EQ(strlen(builder.builder.buffer), 6 + 1024); // NUL check + EXPECT_EQ(builder.ToString(), + std::string("abcabc") + std::string(1023, ' ') + std::string("x")); +} + +} // namespace
diff --git a/iree/base/string_view.c b/iree/base/string_view.c index efbae6e..f117939 100644 --- a/iree/base/string_view.c +++ b/iree/base/string_view.c
@@ -79,7 +79,7 @@ for (iree_host_size_t i = 0; i < s.size; ++i) { lookup_table[(uint8_t)s.data[i]] = true; } - pos = iree_min(pos, value.size); + pos = iree_min(pos, value.size) + 1; iree_host_size_t i = pos; while (i != 0) { --i;
diff --git a/iree/base/string_view.h b/iree/base/string_view.h index af5f499..90350ca 100644 --- a/iree/base/string_view.h +++ b/iree/base/string_view.h
@@ -70,12 +70,12 @@ iree_string_view_t value, char c, iree_host_size_t pos); // Returns the index of the first occurrence of one of the characters in |s| or -// -1 if none of the characters were found. +// IREE_STRING_VIEW_NPOS if none of the characters were found. IREE_API_EXPORT iree_host_size_t iree_string_view_find_first_of( iree_string_view_t value, iree_string_view_t s, iree_host_size_t pos); // Returns the index of the last occurrence of one of the characters in |s| or -// -1 if none of the characters were found. +// IREE_STRING_VIEW_NPOS if none of the characters were found. IREE_API_EXPORT iree_host_size_t iree_string_view_find_last_of( iree_string_view_t value, iree_string_view_t s, iree_host_size_t pos);
diff --git a/iree/base/string_view_test.cc b/iree/base/string_view_test.cc index 8bbde1c..ac5a713 100644 --- a/iree/base/string_view_test.cc +++ b/iree/base/string_view_test.cc
@@ -15,23 +15,141 @@ return std::string(value.data, value.size); } +TEST(StringViewTest, Equal) { + auto equal = [](const char* lhs, const char* rhs) -> bool { + return iree_string_view_equal(iree_make_cstring_view(lhs), + iree_make_cstring_view(rhs)); + }; + EXPECT_TRUE(equal("", "")); + EXPECT_FALSE(equal("a", "")); + EXPECT_FALSE(equal("", "a")); + EXPECT_TRUE(equal("a", "a")); + EXPECT_FALSE(equal("a", "ab")); + EXPECT_FALSE(equal("b", "ab")); + EXPECT_TRUE(equal("abc", "abc")); + EXPECT_FALSE(equal("abc", "aBc")); +} + +TEST(StringViewTest, FindChar) { + auto find_char = [](const char* value, char c, iree_host_size_t pos) { + return iree_string_view_find_char(iree_make_cstring_view(value), c, pos); + }; + EXPECT_EQ(find_char("", 'x', 0), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_char("", 'x', 1), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_char("", 'x', IREE_STRING_VIEW_NPOS), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_char("x", 'x', 0), 0); + EXPECT_EQ(find_char("x", 'x', 1), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_char("x", 'x', IREE_STRING_VIEW_NPOS), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_char("abc", 'x', 0), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_char("abc", 'x', 1), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_char("abc", 'x', IREE_STRING_VIEW_NPOS), + IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_char("axbxc", 'x', 0), 1); + EXPECT_EQ(find_char("axbxc", 'x', 1), 1); + EXPECT_EQ(find_char("axbxc", 'x', 2), 3); + EXPECT_EQ(find_char("axbxc", 'x', 3), 3); + EXPECT_EQ(find_char("axbxc", 'x', 4), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_char("axbxc", 'x', IREE_STRING_VIEW_NPOS), + IREE_STRING_VIEW_NPOS); +} + +TEST(StringViewTest, FindFirstOf) { + auto find_first_of = [](const char* value, const char* s, + iree_host_size_t pos) { + return iree_string_view_find_first_of(iree_make_cstring_view(value), + iree_make_cstring_view(s), pos); + }; + EXPECT_EQ(find_first_of("", "", 0), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("", "", 1), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("", "", IREE_STRING_VIEW_NPOS), + IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("", "x", 0), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("", "x", 1), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("", "x", IREE_STRING_VIEW_NPOS), + IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("x", "x", 0), 0); + EXPECT_EQ(find_first_of("x", "x", 1), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("x", "x", IREE_STRING_VIEW_NPOS), + IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("x", "", 0), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("x", "", 1), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("x", "", IREE_STRING_VIEW_NPOS), + IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("abc", "x", 0), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("abc", "x", 1), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("abc", "x", IREE_STRING_VIEW_NPOS), + IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("axbxc", "xy", 0), 1); + EXPECT_EQ(find_first_of("axbxc", "xy", 1), 1); + EXPECT_EQ(find_first_of("axbxc", "xy", 2), 3); + EXPECT_EQ(find_first_of("axbxc", "xy", 3), 3); + EXPECT_EQ(find_first_of("axbxc", "xy", 4), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("axbxc", "xy", IREE_STRING_VIEW_NPOS), + IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("aybxc", "xy", 0), 1); + EXPECT_EQ(find_first_of("aybxc", "xy", 1), 1); + EXPECT_EQ(find_first_of("aybxc", "xy", 2), 3); + EXPECT_EQ(find_first_of("aybxc", "xy", 3), 3); + EXPECT_EQ(find_first_of("aybxc", "xy", 4), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_first_of("aybxc", "xy", IREE_STRING_VIEW_NPOS), + IREE_STRING_VIEW_NPOS); +} + +TEST(StringViewTest, FindLastOf) { + auto find_last_of = [](const char* value, const char* s, + iree_host_size_t pos) { + return iree_string_view_find_last_of(iree_make_cstring_view(value), + iree_make_cstring_view(s), pos); + }; + EXPECT_EQ(find_last_of("", "", 0), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("", "", 1), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("", "", IREE_STRING_VIEW_NPOS), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("", "x", 0), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("", "x", 1), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("", "x", IREE_STRING_VIEW_NPOS), + IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("x", "x", 0), 0); + EXPECT_EQ(find_last_of("x", "x", 1), 0); + EXPECT_EQ(find_last_of("x", "x", IREE_STRING_VIEW_NPOS), 0); + EXPECT_EQ(find_last_of("x", "", 0), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("x", "", 1), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("x", "", IREE_STRING_VIEW_NPOS), + IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("abc", "x", 0), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("abc", "x", 1), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("abc", "x", IREE_STRING_VIEW_NPOS), + IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("axbxc", "xy", 0), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("axbxc", "xy", 1), 1); + EXPECT_EQ(find_last_of("axbxc", "xy", 2), 1); + EXPECT_EQ(find_last_of("axbxc", "xy", 3), 3); + EXPECT_EQ(find_last_of("axbxc", "xy", 4), 3); + EXPECT_EQ(find_last_of("axbxc", "xy", IREE_STRING_VIEW_NPOS), 3); + EXPECT_EQ(find_last_of("aybxc", "xy", 0), IREE_STRING_VIEW_NPOS); + EXPECT_EQ(find_last_of("aybxc", "xy", 1), 1); + EXPECT_EQ(find_last_of("aybxc", "xy", 2), 1); + EXPECT_EQ(find_last_of("aybxc", "xy", 3), 3); + EXPECT_EQ(find_last_of("aybxc", "xy", 4), 3); + EXPECT_EQ(find_last_of("aybxc", "xy", IREE_STRING_VIEW_NPOS), 3); +} + TEST(StringViewTest, StartsWith) { auto starts_with = [](const char* value, const char* prefix) -> bool { return iree_string_view_starts_with(iree_make_cstring_view(value), iree_make_cstring_view(prefix)); }; - ASSERT_TRUE(starts_with("a", "a")); - ASSERT_TRUE(starts_with("ab", "a")); - ASSERT_TRUE(starts_with("ab", "ab")); - ASSERT_TRUE(starts_with("abc", "ab")); - ASSERT_TRUE(starts_with("abc", "abc")); - ASSERT_FALSE(starts_with("abc", "")); - ASSERT_FALSE(starts_with("", "")); - ASSERT_FALSE(starts_with("", "a")); - ASSERT_FALSE(starts_with("", "abc")); - ASSERT_FALSE(starts_with("abc", "b")); - ASSERT_FALSE(starts_with("abc", "bc")); - ASSERT_FALSE(starts_with("a", "abc")); + EXPECT_TRUE(starts_with("a", "a")); + EXPECT_TRUE(starts_with("ab", "a")); + EXPECT_TRUE(starts_with("ab", "ab")); + EXPECT_TRUE(starts_with("abc", "ab")); + EXPECT_TRUE(starts_with("abc", "abc")); + EXPECT_FALSE(starts_with("abc", "")); + EXPECT_FALSE(starts_with("", "")); + EXPECT_FALSE(starts_with("", "a")); + EXPECT_FALSE(starts_with("", "abc")); + EXPECT_FALSE(starts_with("abc", "b")); + EXPECT_FALSE(starts_with("abc", "bc")); + EXPECT_FALSE(starts_with("a", "abc")); } TEST(StringViewTest, EndsWith) { @@ -39,18 +157,18 @@ return iree_string_view_ends_with(iree_make_cstring_view(value), iree_make_cstring_view(suffix)); }; - ASSERT_TRUE(ends_with("a", "a")); - ASSERT_TRUE(ends_with("ab", "b")); - ASSERT_TRUE(ends_with("ab", "ab")); - ASSERT_TRUE(ends_with("abc", "bc")); - ASSERT_TRUE(ends_with("abc", "c")); - ASSERT_FALSE(ends_with("abc", "")); - ASSERT_FALSE(ends_with("", "")); - ASSERT_FALSE(ends_with("", "a")); - ASSERT_FALSE(ends_with("", "abc")); - ASSERT_FALSE(ends_with("abc", "b")); - ASSERT_FALSE(ends_with("abc", "ab")); - ASSERT_FALSE(ends_with("a", "abc")); + EXPECT_TRUE(ends_with("a", "a")); + EXPECT_TRUE(ends_with("ab", "b")); + EXPECT_TRUE(ends_with("ab", "ab")); + EXPECT_TRUE(ends_with("abc", "bc")); + EXPECT_TRUE(ends_with("abc", "c")); + EXPECT_FALSE(ends_with("abc", "")); + EXPECT_FALSE(ends_with("", "")); + EXPECT_FALSE(ends_with("", "a")); + EXPECT_FALSE(ends_with("", "abc")); + EXPECT_FALSE(ends_with("abc", "b")); + EXPECT_FALSE(ends_with("abc", "ab")); + EXPECT_FALSE(ends_with("a", "abc")); } TEST(StringViewTest, RemovePrefix) { @@ -59,12 +177,12 @@ return ToString( iree_string_view_remove_prefix(iree_make_cstring_view(value), n)); }; - ASSERT_EQ(remove_prefix("", 0), ""); - ASSERT_EQ(remove_prefix("", 1), ""); - ASSERT_EQ(remove_prefix("a", 10), ""); - ASSERT_EQ(remove_prefix("ab", 1), "b"); - ASSERT_EQ(remove_prefix("ab", 2), ""); - ASSERT_EQ(remove_prefix("abcdef", 2), "cdef"); + EXPECT_EQ(remove_prefix("", 0), ""); + EXPECT_EQ(remove_prefix("", 1), ""); + EXPECT_EQ(remove_prefix("a", 10), ""); + EXPECT_EQ(remove_prefix("ab", 1), "b"); + EXPECT_EQ(remove_prefix("ab", 2), ""); + EXPECT_EQ(remove_prefix("abcdef", 2), "cdef"); } TEST(StringViewTest, RemoveSuffix) { @@ -73,27 +191,175 @@ return ToString( iree_string_view_remove_suffix(iree_make_cstring_view(value), n)); }; - ASSERT_EQ(remove_suffix("", 0), ""); - ASSERT_EQ(remove_suffix("", 1), ""); - ASSERT_EQ(remove_suffix("a", 10), ""); - ASSERT_EQ(remove_suffix("ab", 1), "a"); - ASSERT_EQ(remove_suffix("ab", 2), ""); - ASSERT_EQ(remove_suffix("abcdef", 2), "abcd"); + EXPECT_EQ(remove_suffix("", 0), ""); + EXPECT_EQ(remove_suffix("", 1), ""); + EXPECT_EQ(remove_suffix("a", 10), ""); + EXPECT_EQ(remove_suffix("ab", 1), "a"); + EXPECT_EQ(remove_suffix("ab", 2), ""); + EXPECT_EQ(remove_suffix("abcdef", 2), "abcd"); +} + +TEST(StringViewTest, StripPrefix) { + auto strip_prefix = [](const char* value, const char* prefix) -> std::string { + return ToString(iree_string_view_strip_prefix( + iree_make_cstring_view(value), iree_make_cstring_view(prefix))); + }; + EXPECT_EQ(strip_prefix("", ""), ""); + EXPECT_EQ(strip_prefix("", "a"), ""); + EXPECT_EQ(strip_prefix("a", ""), "a"); + EXPECT_EQ(strip_prefix("a", "a"), ""); + EXPECT_EQ(strip_prefix("ab", "a"), "b"); + EXPECT_EQ(strip_prefix("ab", "b"), "ab"); + EXPECT_EQ(strip_prefix("ab", "ab"), ""); + EXPECT_EQ(strip_prefix("ab", "abc"), "ab"); + EXPECT_EQ(strip_prefix("abcdef", "ab"), "cdef"); + EXPECT_EQ(strip_prefix("abcdef", "bc"), "abcdef"); +} + +TEST(StringViewTest, StripSuffix) { + auto strip_suffix = [](const char* value, const char* suffix) -> std::string { + return ToString(iree_string_view_strip_suffix( + iree_make_cstring_view(value), iree_make_cstring_view(suffix))); + }; + EXPECT_EQ(strip_suffix("", ""), ""); + EXPECT_EQ(strip_suffix("", "a"), ""); + EXPECT_EQ(strip_suffix("a", ""), "a"); + EXPECT_EQ(strip_suffix("a", "a"), ""); + EXPECT_EQ(strip_suffix("ab", "a"), "ab"); + EXPECT_EQ(strip_suffix("ab", "b"), "a"); + EXPECT_EQ(strip_suffix("ab", "ab"), ""); + EXPECT_EQ(strip_suffix("ab", "abc"), "ab"); + EXPECT_EQ(strip_suffix("abcdef", "ef"), "abcd"); + EXPECT_EQ(strip_suffix("abcdef", "de"), "abcdef"); +} + +TEST(StringViewTest, ConsumePrefix) { + auto consume_prefix = [](const char* value, + const char* prefix) -> std::string { + iree_string_view_t value_sv = iree_make_cstring_view(value); + if (iree_string_view_consume_prefix(&value_sv, + iree_make_cstring_view(prefix))) { + return ToString(value_sv); + } else { + return "FAILED"; + } + }; + EXPECT_EQ(consume_prefix("", ""), "FAILED"); + EXPECT_EQ(consume_prefix("", "a"), "FAILED"); + EXPECT_EQ(consume_prefix("a", ""), "FAILED"); + EXPECT_EQ(consume_prefix("a", "a"), ""); + EXPECT_EQ(consume_prefix("ab", "a"), "b"); + EXPECT_EQ(consume_prefix("ab", "b"), "FAILED"); + EXPECT_EQ(consume_prefix("ab", "ab"), ""); + EXPECT_EQ(consume_prefix("ab", "abc"), "FAILED"); + EXPECT_EQ(consume_prefix("abcdef", "ab"), "cdef"); + EXPECT_EQ(consume_prefix("abcdef", "bc"), "FAILED"); +} + +TEST(StringViewTest, ConsumeSuffix) { + auto consume_suffix = [](const char* value, + const char* suffix) -> std::string { + iree_string_view_t value_sv = iree_make_cstring_view(value); + if (iree_string_view_consume_suffix(&value_sv, + iree_make_cstring_view(suffix))) { + return ToString(value_sv); + } else { + return "FAILED"; + } + }; + EXPECT_EQ(consume_suffix("", ""), "FAILED"); + EXPECT_EQ(consume_suffix("", "a"), "FAILED"); + EXPECT_EQ(consume_suffix("a", ""), "FAILED"); + EXPECT_EQ(consume_suffix("a", "a"), ""); + EXPECT_EQ(consume_suffix("ab", "a"), "FAILED"); + EXPECT_EQ(consume_suffix("ab", "b"), "a"); + EXPECT_EQ(consume_suffix("ab", "ab"), ""); + EXPECT_EQ(consume_suffix("ab", "abc"), "FAILED"); + EXPECT_EQ(consume_suffix("abcdef", "ef"), "abcd"); + EXPECT_EQ(consume_suffix("abcdef", "de"), "FAILED"); } TEST(StringViewTest, Trim) { auto trim = [](const char* value) -> std::string { return ToString(iree_string_view_trim(iree_make_cstring_view(value))); }; - ASSERT_EQ(trim(""), ""); - ASSERT_EQ(trim("a"), "a"); - ASSERT_EQ(trim(" a"), "a"); - ASSERT_EQ(trim("a "), "a"); - ASSERT_EQ(trim("a b"), "a b"); - ASSERT_EQ(trim(" a b "), "a b"); - ASSERT_EQ(trim("\t\t\na b\n \t "), "a b"); - ASSERT_EQ(trim("\n"), ""); - ASSERT_EQ(trim("\r\n"), ""); + EXPECT_EQ(trim(""), ""); + EXPECT_EQ(trim("a"), "a"); + EXPECT_EQ(trim(" a"), "a"); + EXPECT_EQ(trim("a "), "a"); + EXPECT_EQ(trim("a b"), "a b"); + EXPECT_EQ(trim(" a b "), "a b"); + EXPECT_EQ(trim("\t\t\na b\n \t "), "a b"); + EXPECT_EQ(trim("\n"), ""); + EXPECT_EQ(trim("\r\n"), ""); +} + +TEST(StringViewTest, Substr) { + auto substr = [](const char* value, iree_host_size_t pos, + iree_host_size_t n) { + return ToString( + iree_string_view_substr(iree_make_cstring_view(value), pos, n)); + }; + EXPECT_EQ(substr("", 0, 0), ""); + EXPECT_EQ(substr("", 0, 1), ""); + EXPECT_EQ(substr("", 0, INTPTR_MAX), ""); + EXPECT_EQ(substr("", 1, 0), ""); + EXPECT_EQ(substr("", 1, 1), ""); + EXPECT_EQ(substr("", 1, INTPTR_MAX), ""); + + EXPECT_EQ(substr("a", 0, 0), ""); + EXPECT_EQ(substr("a", 0, 1), "a"); + EXPECT_EQ(substr("a", 0, 2), "a"); + EXPECT_EQ(substr("a", 0, INTPTR_MAX), "a"); + EXPECT_EQ(substr("a", 1, 0), ""); + EXPECT_EQ(substr("a", 1, 1), ""); + EXPECT_EQ(substr("a", 1, INTPTR_MAX), ""); + + EXPECT_EQ(substr("abc", 0, 1), "a"); + EXPECT_EQ(substr("abc", 1, 1), "b"); + EXPECT_EQ(substr("abc", 2, 1), "c"); + EXPECT_EQ(substr("abc", 0, 2), "ab"); + EXPECT_EQ(substr("abc", 1, 2), "bc"); + EXPECT_EQ(substr("abc", 1, INTPTR_MAX), "bc"); + EXPECT_EQ(substr("abc", 0, 3), "abc"); + EXPECT_EQ(substr("abc", 0, INTPTR_MAX), "abc"); +} + +TEST(StringViewTest, Split) { + auto split = + [](const char* value, + char split_char) -> std::tuple<intptr_t, std::string, std::string> { + iree_string_view_t lhs; + iree_string_view_t rhs; + intptr_t index = iree_string_view_split(iree_make_cstring_view(value), + split_char, &lhs, &rhs); + return std::make_tuple(index, ToString(lhs), ToString(rhs)); + }; + EXPECT_EQ(split("", 'x'), std::make_tuple(-1, "", "")); + EXPECT_EQ(split(" ", 'x'), std::make_tuple(-1, " ", "")); + EXPECT_EQ(split("x", 'x'), std::make_tuple(0, "", "")); + EXPECT_EQ(split(" x ", 'x'), std::make_tuple(1, " ", " ")); + EXPECT_EQ(split("axb", 'x'), std::make_tuple(1, "a", "b")); + EXPECT_EQ(split("axxxb", 'x'), std::make_tuple(1, "a", "xxb")); + EXPECT_EQ(split("ax", 'x'), std::make_tuple(1, "a", "")); + EXPECT_EQ(split("xb", 'x'), std::make_tuple(0, "", "b")); + EXPECT_EQ(split("axbxc", 'x'), std::make_tuple(1, "a", "bxc")); +} + +TEST(StringViewTest, ReplaceChar) { + auto replace_char = [](const char* value, char old_char, char new_char) { + std::string value_clone(value); + iree_string_view_replace_char( + iree_make_string_view(value_clone.data(), value_clone.size()), old_char, + new_char); + return value_clone; + }; + EXPECT_EQ(replace_char("", 'x', 'y'), ""); + EXPECT_EQ(replace_char(" ", 'x', 'y'), " "); + EXPECT_EQ(replace_char("a", 'x', 'y'), "a"); + EXPECT_EQ(replace_char("x", 'x', 'y'), "y"); + EXPECT_EQ(replace_char("xx", 'x', 'y'), "yy"); + EXPECT_EQ(replace_char("axbxc", 'x', 'y'), "aybyc"); } } // namespace
diff --git a/iree/compiler/Bindings/Native/Transforms/BUILD b/iree/compiler/Bindings/Native/Transforms/BUILD index d2f45e4..f5d3bb1 100644 --- a/iree/compiler/Bindings/Native/Transforms/BUILD +++ b/iree/compiler/Bindings/Native/Transforms/BUILD
@@ -22,10 +22,10 @@ deps = [ "//iree/compiler/Dialect/Flow/IR", "//iree/compiler/Dialect/HAL/IR", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Shape/IR", "//iree/compiler/Dialect/Shape/Transforms", "//iree/compiler/Dialect/Shape/Utils:TypeConversion", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Utils", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR",
diff --git a/iree/compiler/Bindings/Native/Transforms/CMakeLists.txt b/iree/compiler/Bindings/Native/Transforms/CMakeLists.txt index f1086e1..d94dfaa 100644 --- a/iree/compiler/Bindings/Native/Transforms/CMakeLists.txt +++ b/iree/compiler/Bindings/Native/Transforms/CMakeLists.txt
@@ -32,10 +32,10 @@ MLIRTransforms iree::compiler::Dialect::Flow::IR iree::compiler::Dialect::HAL::IR - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Shape::IR iree::compiler::Dialect::Shape::Transforms iree::compiler::Dialect::Shape::Utils::TypeConversion + iree::compiler::Dialect::Util::IR iree::compiler::Utils PUBLIC )
diff --git a/iree/compiler/Bindings/TFLite/Transforms/BUILD b/iree/compiler/Bindings/TFLite/Transforms/BUILD index 952494d..f123eb3 100644 --- a/iree/compiler/Bindings/TFLite/Transforms/BUILD +++ b/iree/compiler/Bindings/TFLite/Transforms/BUILD
@@ -22,10 +22,10 @@ ], deps = [ "//iree/compiler/Dialect/Flow/IR", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Shape/IR", "//iree/compiler/Dialect/Shape/Transforms", "//iree/compiler/Dialect/Shape/Utils:TypeConversion", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Utils", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR",
diff --git a/iree/compiler/Bindings/TFLite/Transforms/CMakeLists.txt b/iree/compiler/Bindings/TFLite/Transforms/CMakeLists.txt index 7e070a2..7fc31bf 100644 --- a/iree/compiler/Bindings/TFLite/Transforms/CMakeLists.txt +++ b/iree/compiler/Bindings/TFLite/Transforms/CMakeLists.txt
@@ -31,10 +31,10 @@ MLIRTransformUtils MLIRTransforms iree::compiler::Dialect::Flow::IR - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Shape::IR iree::compiler::Dialect::Shape::Transforms iree::compiler::Dialect::Shape::Utils::TypeConversion + iree::compiler::Dialect::Util::IR iree::compiler::Utils PUBLIC )
diff --git a/iree/compiler/Bindings/TFLite/Transforms/MaterializeShapeSupport.cpp b/iree/compiler/Bindings/TFLite/Transforms/MaterializeShapeSupport.cpp index f47125a..3acb9a2 100644 --- a/iree/compiler/Bindings/TFLite/Transforms/MaterializeShapeSupport.cpp +++ b/iree/compiler/Bindings/TFLite/Transforms/MaterializeShapeSupport.cpp
@@ -6,10 +6,10 @@ #include "iree/compiler/Dialect/Flow/IR/FlowDialect.h" #include "iree/compiler/Dialect/Flow/IR/FlowOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" #include "iree/compiler/Dialect/Shape/IR/ShapeDialect.h" #include "iree/compiler/Dialect/Shape/IR/ShapeOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "llvm/ADT/STLExtras.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Attributes.h" @@ -40,7 +40,7 @@ public: void getDependentDialects(DialectRegistry ®istry) const override { registry.insert<iree_compiler::IREE::Flow::FlowDialect>(); - registry.insert<iree_compiler::IREEDialect>(); + registry.insert<iree_compiler::IREE::Util::UtilDialect>(); registry.insert<iree_compiler::ShapeDialect>(); registry.insert<StandardOpsDialect>(); } @@ -210,8 +210,9 @@ llvm::zip(entryBlock.getArguments(), inputVarOps)) { auto inputValue = std::get<0>(inputValueVar); auto inputVarOp = std::get<1>(inputValueVar); - auto inputPlaceholder = recalculateBuilder.createOrFold<IREE::NullOp>( - loc, inputValue.getType()); + auto inputPlaceholder = + recalculateBuilder.createOrFold<IREE::Util::NullOp>( + loc, inputValue.getType()); auto inputShapeValue = recalculateBuilder.createOrFold<IREE::Flow::VariableLoadOp>( loc, inputVarOp.type(), inputVarOp.getName()); @@ -300,13 +301,13 @@ // Packs a shape into a list. void packShape(Location loc, Shape::RankedShapeType shapeType, Value shapeValue, Value listValue, OpBuilder &builder) { - builder.create<IREE::ListResizeOp>( + builder.create<IREE::Util::ListResizeOp>( loc, listValue, builder.createOrFold<ConstantIndexOp>(loc, shapeType.getRank())); for (int i = 0; i < shapeType.getRank(); ++i) { auto dimValue = builder.createOrFold<Shape::RankedDimOp>(loc, shapeValue, i); - builder.create<IREE::ListSetOp>( + builder.create<IREE::Util::ListSetOp>( loc, listValue, builder.createOrFold<ConstantIndexOp>(loc, i), dimValue); } @@ -318,7 +319,7 @@ SmallVector<Value, 4> dynamicDims; for (int i = 0; i < shapeType.getRank(); ++i) { if (!shapeType.isDimDynamic(i)) continue; - dynamicDims.push_back(builder.createOrFold<IREE::ListGetOp>( + dynamicDims.push_back(builder.createOrFold<IREE::Util::ListGetOp>( loc, builder.getIndexType(), listValue, builder.createOrFold<ConstantIndexOp>(loc, i))); } @@ -328,7 +329,7 @@ // Creates a function to query the |inputVarOps| at runtime by the bindings. // - // func @_query_input_shape(%index : index, %shape : !iree.list<index>) + // func @_query_input_shape(%index : index, %shape : !util.list<index>) void createQueryInputShapeFunc(Location loc, StringRef namePrefix, ArrayRef<IREE::Flow::VariableOp> inputVarOps, OpBuilder &moduleBuilder) { @@ -337,7 +338,7 @@ moduleBuilder.getFunctionType(/*inputs=*/ TypeRange{ moduleBuilder.getIndexType(), - IREE::ListType::get( + IREE::Util::ListType::get( moduleBuilder.getIndexType()), }, /*outputs=*/TypeRange{})); @@ -364,7 +365,7 @@ // Creates a function to resize |inputVarOps| and sets the |dirtyVarOp| flag. // - // func @_resize_input_shape(%index : index, %shape : !iree.list<index>) + // func @_resize_input_shape(%index : index, %shape : !util.list<index>) void createResizeInputShapeFunc(Location loc, StringRef namePrefix, ArrayRef<IREE::Flow::VariableOp> inputVarOps, IREE::Flow::VariableOp dirtyVarOp, @@ -374,7 +375,7 @@ moduleBuilder.getFunctionType(/*inputs=*/ TypeRange{ moduleBuilder.getIndexType(), - IREE::ListType::get( + IREE::Util::ListType::get( moduleBuilder.getIndexType()), }, /*outputs=*/TypeRange{})); @@ -404,7 +405,7 @@ // Creates a function to query the |outputVarOps| at runtime by the bindings. // - // func @_query_output_shape(%index : index, %shape : !iree.list<index>) + // func @_query_output_shape(%index : index, %shape : !util.list<index>) void createQueryOutputShapeFunc(Location loc, StringRef namePrefix, ArrayRef<IREE::Flow::VariableOp> outputVarOps, FuncOp calculateShapeFuncOp, @@ -414,7 +415,7 @@ moduleBuilder.getFunctionType(/*inputs=*/ TypeRange{ moduleBuilder.getIndexType(), - IREE::ListType::get( + IREE::Util::ListType::get( moduleBuilder.getIndexType()), }, /*outputs=*/TypeRange{}));
diff --git a/iree/compiler/Bindings/TFLite/Transforms/test/materialize_shape_support.mlir b/iree/compiler/Bindings/TFLite/Transforms/test/materialize_shape_support.mlir index 05fd29e..94ab7f5 100644 --- a/iree/compiler/Bindings/TFLite/Transforms/test/materialize_shape_support.mlir +++ b/iree/compiler/Bindings/TFLite/Transforms/test/materialize_shape_support.mlir
@@ -13,10 +13,10 @@ // CHECK-NEXT: %[[IS_DIRTY:.+]] = flow.variable.load @_tflite_dynamicEntry_shapes_dirty : i1 // CHECK-NEXT: cond_br %[[IS_DIRTY]], ^bb1, ^bb2 // CHECK-NEXT: ^bb1: -// CHECK-NEXT: %[[IN0_NULL:.+]] = iree.null : tensor<?x8x8x3xf32> +// CHECK-NEXT: %[[IN0_NULL:.+]] = util.null : tensor<?x8x8x3xf32> // CHECK-NEXT: %[[IN0_SHAPE:.+]] = flow.variable.load @_tflite_dynamicEntry_input0_shape : !shapex.ranked_shape<[?,8,8,3]> // CHECK-NEXT: %[[IN0:.+]] = shapex.tie_shape %[[IN0_NULL]], %[[IN0_SHAPE]] : tensor<?x8x8x3xf32>, !shapex.ranked_shape<[?,8,8,3]> -// CHECK-NEXT: %[[IN1_NULL:.+]] = iree.null : tensor<?x8x8x3xf32> +// CHECK-NEXT: %[[IN1_NULL:.+]] = util.null : tensor<?x8x8x3xf32> // CHECK-NEXT: %[[IN1_SHAPE:.+]] = flow.variable.load @_tflite_dynamicEntry_input1_shape : !shapex.ranked_shape<[?,8,8,3]> // CHECK-NEXT: %[[IN1:.+]] = shapex.tie_shape %[[IN1_NULL]], %[[IN1_SHAPE]] : tensor<?x8x8x3xf32>, !shapex.ranked_shape<[?,8,8,3]> // CHECK-NEXT: %[[TMP:.+]]:2 = call @dynamicEntry(%[[IN0]], %[[IN1]]) @@ -31,40 +31,40 @@ // CHECK-NEXT: } // CHECK-LABEL: func @_tflite_dynamicEntry_query_input_shape -// CHECK-SAME: (%[[INDEX:.+]]: index, %[[LIST:.+]]: !iree.list<index>) +// CHECK-SAME: (%[[INDEX:.+]]: index, %[[LIST:.+]]: !util.list<index>) // CHECK: %[[IS_0:.+]] = cmpi eq, %[[INDEX]], %c0 : index // CHECK-NEXT: cond_br %[[IS_0]], ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // CHECK-NEXT: %[[IN0_SHAPE:.+]] = flow.variable.load @_tflite_dynamicEntry_input0_shape : !shapex.ranked_shape<[?,8,8,3]> -// CHECK-NEXT: iree.list.resize %[[LIST]], %c4 : !iree.list<index> +// CHECK-NEXT: util.list.resize %[[LIST]], %c4 : !util.list<index> // CHECK-NEXT: %[[IN0_D0:.+]] = shapex.ranked_dim %[[IN0_SHAPE]][0] : !shapex.ranked_shape<[?,8,8,3]> -> index -// CHECK-NEXT: iree.list.set %[[LIST]][%c0], %[[IN0_D0]] : !iree.list<index> -// CHECK-NEXT: iree.list.set %[[LIST]][%c1], %c8 : !iree.list<index> -// CHECK-NEXT: iree.list.set %[[LIST]][%c2], %c8 : !iree.list<index> -// CHECK-NEXT: iree.list.set %[[LIST]][%c3], %c3 : !iree.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c0], %[[IN0_D0]] : !util.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c1], %c8 : !util.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c2], %c8 : !util.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c3], %c3 : !util.list<index> // CHECK-NEXT: br ^bb4 // CHECK-NEXT: ^bb2: // CHECK-NEXT: %[[IS_1:.+]] = cmpi eq, %[[INDEX]], %c1 : index // CHECK-NEXT: cond_br %[[IS_1]], ^bb3, ^bb4 // CHECK-NEXT: ^bb3: // CHECK-NEXT: %[[IN1_SHAPE:.+]] = flow.variable.load @_tflite_dynamicEntry_input1_shape : !shapex.ranked_shape<[?,8,8,3]> -// CHECK-NEXT: iree.list.resize %[[LIST]], %c4 : !iree.list<index> +// CHECK-NEXT: util.list.resize %[[LIST]], %c4 : !util.list<index> // CHECK-NEXT: %[[IN1_D0:.+]] = shapex.ranked_dim %[[IN1_SHAPE]][0] : !shapex.ranked_shape<[?,8,8,3]> -> index -// CHECK-NEXT: iree.list.set %[[LIST]][%c0], %[[IN1_D0]] : !iree.list<index> -// CHECK-NEXT: iree.list.set %[[LIST]][%c1], %c8 : !iree.list<index> -// CHECK-NEXT: iree.list.set %[[LIST]][%c2], %c8 : !iree.list<index> -// CHECK-NEXT: iree.list.set %[[LIST]][%c3], %c3 : !iree.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c0], %[[IN1_D0]] : !util.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c1], %c8 : !util.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c2], %c8 : !util.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c3], %c3 : !util.list<index> // CHECK-NEXT: br ^bb4 // CHECK-NEXT: ^bb4: // CHECK-NEXT: return // CHECK-NEXT: } // CHECK-LABEL: func @_tflite_dynamicEntry_resize_input_shape -// CHECK-SAME: (%[[INDEX:.+]]: index, %[[LIST:.+]]: !iree.list<index>) +// CHECK-SAME: (%[[INDEX:.+]]: index, %[[LIST:.+]]: !util.list<index>) // CHECK: %[[IS_0:.+]] = cmpi eq, %[[INDEX]], %c0 : index // CHECK-NEXT: cond_br %[[IS_0]], ^bb1, ^bb2 // CHECK-NEXT: ^bb1: -// CHECK-NEXT: %[[IN0_D0:.+]] = iree.list.get %[[LIST]][%c0] : !iree.list<index> +// CHECK-NEXT: %[[IN0_D0:.+]] = util.list.get %[[LIST]][%c0] : !util.list<index> // CHECK-NEXT: %[[IN0_SHAPE:.+]] = shapex.make_ranked_shape %[[IN0_D0]] : (index) -> !shapex.ranked_shape<[?,8,8,3]> // CHECK-NEXT: flow.variable.store %[[IN0_SHAPE]], @_tflite_dynamicEntry_input0_shape : !shapex.ranked_shape<[?,8,8,3]> // CHECK-NEXT: br ^bb4 @@ -72,7 +72,7 @@ // CHECK-NEXT: %[[IS_1:.+]] = cmpi eq, %[[INDEX]], %c1 : index // CHECK-NEXT: cond_br %[[IS_1]], ^bb3, ^bb4 // CHECK-NEXT: ^bb3: -// CHECK-NEXT: %[[IN1_D0:.+]] = iree.list.get %[[LIST]][%c0] : !iree.list<index> +// CHECK-NEXT: %[[IN1_D0:.+]] = util.list.get %[[LIST]][%c0] : !util.list<index> // CHECK-NEXT: %[[IN1_SHAPE:.+]] = shapex.make_ranked_shape %[[IN1_D0]] : (index) -> !shapex.ranked_shape<[?,8,8,3]> // CHECK-NEXT: flow.variable.store %[[IN1_SHAPE]], @_tflite_dynamicEntry_input1_shape : !shapex.ranked_shape<[?,8,8,3]> // CHECK-NEXT: br ^bb4 @@ -82,30 +82,30 @@ // CHECK-NEXT: } // CHECK-LABEL: func @_tflite_dynamicEntry_query_output_shape -// CHECK-SAME: (%[[INDEX:.+]]: index, %[[LIST:.+]]: !iree.list<index>) +// CHECK-SAME: (%[[INDEX:.+]]: index, %[[LIST:.+]]: !util.list<index>) // CHECK: call @_tflite_dynamicEntry_calculate_shapes() : () -> () // CHECK-NEXT: %[[IS_0:.+]] = cmpi eq, %[[INDEX]], %c0 : index // CHECK-NEXT: cond_br %[[IS_0]], ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // CHECK-NEXT: %[[OUT0_SHAPE:.+]] = flow.variable.load @_tflite_dynamicEntry_output0_shape : !shapex.ranked_shape<[?,8,8,3]> -// CHECK-NEXT: iree.list.resize %[[LIST]], %c4 : !iree.list<index> +// CHECK-NEXT: util.list.resize %[[LIST]], %c4 : !util.list<index> // CHECK-NEXT: %[[OUT0_D0:.+]] = shapex.ranked_dim %[[OUT0_SHAPE]][0] : !shapex.ranked_shape<[?,8,8,3]> -> index -// CHECK-NEXT: iree.list.set %[[LIST]][%c0], %[[OUT0_D0]] : !iree.list<index> -// CHECK-NEXT: iree.list.set %[[LIST]][%c1], %c8 : !iree.list<index> -// CHECK-NEXT: iree.list.set %[[LIST]][%c2], %c8 : !iree.list<index> -// CHECK-NEXT: iree.list.set %[[LIST]][%c3], %c3 : !iree.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c0], %[[OUT0_D0]] : !util.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c1], %c8 : !util.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c2], %c8 : !util.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c3], %c3 : !util.list<index> // CHECK-NEXT: br ^bb4 // CHECK-NEXT: ^bb2: // CHECK-NEXT: %[[IS_1:.+]] = cmpi eq, %[[INDEX]], %c1 : index // CHECK-NEXT: cond_br %[[IS_1]], ^bb3, ^bb4 // CHECK-NEXT: ^bb3: // CHECK-NEXT: %[[OUT1_SHAPE:.+]] = flow.variable.load @_tflite_dynamicEntry_output1_shape : !shapex.ranked_shape<[?,8,8,3]> -// CHECK-NEXT: iree.list.resize %[[LIST]], %c4 : !iree.list<index> +// CHECK-NEXT: util.list.resize %[[LIST]], %c4 : !util.list<index> // CHECK-NEXT: %[[OUT1_D0:.+]] = shapex.ranked_dim %[[OUT1_SHAPE]][0] : !shapex.ranked_shape<[?,8,8,3]> -> index -// CHECK-NEXT: iree.list.set %[[LIST]][%c0], %[[OUT1_D0]] : !iree.list<index> -// CHECK-NEXT: iree.list.set %[[LIST]][%c1], %c8 : !iree.list<index> -// CHECK-NEXT: iree.list.set %[[LIST]][%c2], %c8 : !iree.list<index> -// CHECK-NEXT: iree.list.set %[[LIST]][%c3], %c3 : !iree.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c0], %[[OUT1_D0]] : !util.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c1], %c8 : !util.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c2], %c8 : !util.list<index> +// CHECK-NEXT: util.list.set %[[LIST]][%c3], %c3 : !util.list<index> // CHECK-NEXT: br ^bb4 // CHECK-NEXT: ^bb4: // CHECK-NEXT: return
diff --git a/iree/compiler/Codegen/Common/BUILD b/iree/compiler/Codegen/Common/BUILD index 26cd75c..996e84c 100644 --- a/iree/compiler/Codegen/Common/BUILD +++ b/iree/compiler/Codegen/Common/BUILD
@@ -50,9 +50,9 @@ "//iree/compiler/Codegen/Utils", "//iree/compiler/Dialect/Flow/IR", "//iree/compiler/Dialect/HAL/IR", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/LinalgExt/IR", "//iree/compiler/Dialect/Shape/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:Affine", "@llvm-project//mlir:AffineUtils",
diff --git a/iree/compiler/Codegen/Common/CMakeLists.txt b/iree/compiler/Codegen/Common/CMakeLists.txt index d3c8172..9f15aff 100644 --- a/iree/compiler/Codegen/Common/CMakeLists.txt +++ b/iree/compiler/Codegen/Common/CMakeLists.txt
@@ -60,9 +60,9 @@ iree::compiler::Codegen::Utils iree::compiler::Dialect::Flow::IR iree::compiler::Dialect::HAL::IR - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::LinalgExt::IR iree::compiler::Dialect::Shape::IR + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/Codegen/Common/DemoteF32ToF16.cpp b/iree/compiler/Codegen/Common/DemoteF32ToF16.cpp index 2350626..55bebcb 100644 --- a/iree/compiler/Codegen/Common/DemoteF32ToF16.cpp +++ b/iree/compiler/Codegen/Common/DemoteF32ToF16.cpp
@@ -10,7 +10,7 @@ #include "iree/compiler/Codegen/PassDetail.h" #include "iree/compiler/Codegen/Passes.h" #include "iree/compiler/Dialect/Flow/IR/FlowOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" @@ -31,7 +31,7 @@ /// Any fp32 derived type is illegal. static bool isIllegalType(Type type) { if (type.isF32()) return true; - if (auto ptrType = type.dyn_cast<IREE::PtrType>()) { + if (auto ptrType = type.dyn_cast<IREE::Util::PtrType>()) { return isIllegalType(ptrType.getTargetType()); } if (auto shapedType = type.dyn_cast<ShapedType>()) { @@ -55,10 +55,10 @@ return type; }); addConversion(convertTensor); - addConversion([&](IREE::PtrType ptrType) { + addConversion([&](IREE::Util::PtrType ptrType) { if (auto tensorType = ptrType.getTargetType().dyn_cast<RankedTensorType>()) { - return IREE::PtrType::get(convertTensor(tensorType)); + return IREE::Util::PtrType::get(convertTensor(tensorType)); } return ptrType; });
diff --git a/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp b/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp index f8742b7..9740afb 100644 --- a/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp +++ b/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp
@@ -44,10 +44,10 @@ #include "iree/compiler/Dialect/Flow/IR/FlowOps.h" #include "iree/compiler/Dialect/Flow/IR/FlowTypes.h" #include "iree/compiler/Dialect/HAL/IR/HALOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" #include "iree/compiler/Dialect/LinalgExt/IR/LinalgExtOps.h" #include "iree/compiler/Dialect/Shape/IR/ShapeOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "llvm/ADT/EquivalenceClasses.h" #include "llvm/ADT/TypeSwitch.h" #include "mlir/Dialect/Linalg/IR/LinalgOps.h" @@ -1469,8 +1469,9 @@ public: LinalgBufferizePass(WorkgroupMemoryAllocationFn fn) : allocationFn(fn) {} void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert<IREEDialect, linalg::LinalgDialect, memref::MemRefDialect, - scf::SCFDialect, StandardOpsDialect>(); + registry + .insert<IREE::Util::UtilDialect, linalg::LinalgDialect, + memref::MemRefDialect, scf::SCFDialect, StandardOpsDialect>(); } void runOnOperation() override;
diff --git a/iree/compiler/Codegen/Common/OptimizeVectorTransferPass.cpp b/iree/compiler/Codegen/Common/OptimizeVectorTransferPass.cpp index a392321..78e006e 100644 --- a/iree/compiler/Codegen/Common/OptimizeVectorTransferPass.cpp +++ b/iree/compiler/Codegen/Common/OptimizeVectorTransferPass.cpp
@@ -6,12 +6,14 @@ #include "iree/compiler/Codegen/PassDetail.h" #include "iree/compiler/Codegen/Passes.h" +#include "mlir/Dialect/Linalg/Transforms/Hoisting.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Dialect/Vector/VectorOps.h" #include "mlir/Dialect/Vector/VectorTransforms.h" #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "mlir/Transforms/LoopUtils.h" namespace mlir { namespace iree_compiler { @@ -73,6 +75,16 @@ } }; +static void loopInvariantCodeMotion(FuncOp funcOp) { + // Walk through all loops in a function in innermost-loop-first order. This + // way, we first LICM from the inner loop, and place the ops in + // the outer loop, which in turn can be further LICM'ed. + funcOp.walk([&](LoopLikeOpInterface loopLike) { + if (failed(moveLoopInvariantCode(loopLike))) + llvm_unreachable("Unexpected failure to move invariant code out of loop"); + }); +} + struct OptimizeVectorTransferPass : public OptimizeVectorTransferBase<OptimizeVectorTransferPass> { void runOnOperation() override { @@ -84,7 +96,11 @@ mlir::vector::populateCastAwayVectorLeadingOneDimPatterns(patterns); patterns.add<TransposeUnitDimToShapeCast>(&getContext()); (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns)); - + // Workaround, run loop invariant code motion before hoist redudant vector + // transfer to workaround a bug upstream. + // TODO(thomasraoux): Remove it once the fix is merged. + loopInvariantCodeMotion(funcOp); + linalg::hoistRedundantVectorTransfers(funcOp); vector::transferOpflowOpt(funcOp); // Delete potential dead alloc and associated ops after store to load // forwarding.
diff --git a/iree/compiler/Codegen/Common/test/f32Tof16.mlir b/iree/compiler/Codegen/Common/test/f32Tof16.mlir index 6cd8325..32bec08 100644 --- a/iree/compiler/Codegen/Common/test/f32Tof16.mlir +++ b/iree/compiler/Codegen/Common/test/f32Tof16.mlir
@@ -2,14 +2,14 @@ // CHECK: flow.variable {{.*}} : tensor<4xf16> // CHECK-LABEL: func @simple_f32() -> tensor<4xf16> -// CHECK-NEXT: %{{.*}} = flow.variable.address @__global : !iree.ptr<tensor<4xf16>> -// CHECK-NEXT: %{{.*}} = flow.variable.load.indirect %{{.*}} : !iree.ptr<tensor<4xf16>> -> tensor<4xf16> +// CHECK-NEXT: %{{.*}} = flow.variable.address @__global : !util.ptr<tensor<4xf16>> +// CHECK-NEXT: %{{.*}} = flow.variable.load.indirect %{{.*}} : !util.ptr<tensor<4xf16>> -> tensor<4xf16> // CHECK-NEXT: return %{{.*}} : tensor<4xf16> module { flow.variable @"__global" dense<"0x000020410000A040000020410000A040"> : tensor<4xf32> attributes {sym_visibility = "private"} func @simple_f32() -> (tensor<4xf32>) { - %0 = flow.variable.address @"__global" : !iree.ptr<tensor<4xf32>> - %1 = flow.variable.load.indirect %0 : !iree.ptr<tensor<4xf32>> -> tensor<4xf32> + %0 = flow.variable.address @"__global" : !util.ptr<tensor<4xf32>> + %1 = flow.variable.load.indirect %0 : !util.ptr<tensor<4xf32>> -> tensor<4xf32> return %1 : tensor<4xf32> } } @@ -24,8 +24,8 @@ module { flow.variable @"__global" dense<"0x000020410000A040000020410000A040"> : tensor<4xf32> attributes {sym_visibility = "private"} func @nested_region_f32() -> (tensor<4xf32>) { - %0 = flow.variable.address @"__iree_flow_bert/embeddings/FakeLayerNorm/beta" : !iree.ptr<tensor<4xf32>> - %1 = flow.variable.load.indirect %0 : !iree.ptr<tensor<4xf32>> -> tensor<4xf32> + %0 = flow.variable.address @"__iree_flow_bert/embeddings/FakeLayerNorm/beta" : !util.ptr<tensor<4xf32>> + %1 = flow.variable.load.indirect %0 : !util.ptr<tensor<4xf32>> -> tensor<4xf32> %2 = "mhlo.broadcast_in_dim"(%1) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<4xf32>) -> tensor<4x4xf32> %4 = mhlo.constant dense<0xFF800000> : tensor<f32> %3 = "mhlo.reduce"(%2, %4) ( {
diff --git a/iree/compiler/Codegen/LLVMCPU/BUILD b/iree/compiler/Codegen/LLVMCPU/BUILD index b5237aa..ad514f9 100644 --- a/iree/compiler/Codegen/LLVMCPU/BUILD +++ b/iree/compiler/Codegen/LLVMCPU/BUILD
@@ -36,11 +36,11 @@ "//iree/compiler/Dialect/Flow/IR", "//iree/compiler/Dialect/HAL/IR", "//iree/compiler/Dialect/HAL/IR:HALDialect", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/LinalgExt/IR", "//iree/compiler/Dialect/LinalgExt/Transforms", "//iree/compiler/Dialect/Shape/IR", "//iree/compiler/Dialect/Shape/Transforms", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:AffineToStandardTransforms", "@llvm-project//mlir:Analysis",
diff --git a/iree/compiler/Codegen/LLVMCPU/CMakeLists.txt b/iree/compiler/Codegen/LLVMCPU/CMakeLists.txt index 514169e..de9fc28 100644 --- a/iree/compiler/Codegen/LLVMCPU/CMakeLists.txt +++ b/iree/compiler/Codegen/LLVMCPU/CMakeLists.txt
@@ -63,11 +63,11 @@ iree::compiler::Dialect::Flow::IR iree::compiler::Dialect::HAL::IR iree::compiler::Dialect::HAL::IR::HALDialect - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::LinalgExt::IR iree::compiler::Dialect::LinalgExt::Transforms iree::compiler::Dialect::Shape::IR iree::compiler::Dialect::Shape::Transforms + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/Codegen/LLVMCPU/ConvertToLLVM.cpp b/iree/compiler/Codegen/LLVMCPU/ConvertToLLVM.cpp index dd67846..7882c97 100644 --- a/iree/compiler/Codegen/LLVMCPU/ConvertToLLVM.cpp +++ b/iree/compiler/Codegen/LLVMCPU/ConvertToLLVM.cpp
@@ -9,9 +9,9 @@ #include "iree/compiler/Codegen/Utils/Utils.h" #include "iree/compiler/Dialect/HAL/IR/HALDialect.h" #include "iree/compiler/Dialect/HAL/IR/HALOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" #include "iree/compiler/Dialect/Shape/IR/ShapeDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "llvm/Support/raw_ostream.h" #include "mlir/Analysis/DataLayoutAnalysis.h" #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" @@ -654,9 +654,9 @@ // rest of the IR. target.addLegalOp<ModuleOp, IREE::HAL::InterfaceOp, IREE::HAL::InterfaceBindingOp, IREE::HAL::InterfaceEndOp>(); - target.addIllegalDialect<ShapeDialect, StandardOpsDialect, IREEDialect, - IREE::HAL::HALDialect, math::MathDialect, - tosa::TosaDialect>(); + target.addIllegalDialect<ShapeDialect, StandardOpsDialect, + IREE::Util::UtilDialect, IREE::HAL::HALDialect, + math::MathDialect, tosa::TosaDialect>(); target.addIllegalOp<UnrealizedConversionCastOp>(); // Don't apply patterns to private function (e.g num_workgroups func). @@ -664,15 +664,15 @@ if (isEntryPoint(funcOp)) return false; return true; }); - target - .addDynamicallyLegalDialect<ShapeDialect, StandardOpsDialect, IREEDialect, - IREE::HAL::HALDialect, math::MathDialect>( - [&](Operation *op) { - auto funcParent = op->getParentOfType<FuncOp>(); - if (!funcParent) return false; - if (isEntryPoint(funcParent)) return false; - return true; - }); + target.addDynamicallyLegalDialect<ShapeDialect, StandardOpsDialect, + IREE::Util::UtilDialect, + IREE::HAL::HALDialect, math::MathDialect>( + [&](Operation *op) { + auto funcParent = op->getParentOfType<FuncOp>(); + if (!funcParent) return false; + if (isEntryPoint(funcParent)) return false; + return true; + }); if (failed(applyPartialConversion(module, target, std::move(patterns)))) { signalPassFailure();
diff --git a/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp b/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp index 4e1c81c..25a50f8 100644 --- a/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp +++ b/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp
@@ -60,6 +60,21 @@ "iree-codegen-llvm-batch-matmul-vector-size", llvm::cl::desc("linalg.batch_matmul vector tile size"), llvm::cl::init(4)); +static llvm::cl::list<int> mmt4dWorkgroupTileSizes( + "iree-codegen-llvm-mmt4d-workgroup-tile-sizes", + llvm::cl::desc("linalg.mmt4d workgroup tile size"), llvm::cl::ZeroOrMore, + llvm::cl::MiscFlags::CommaSeparated); + +static llvm::cl::list<int> mmt4dL1TileSizes( + "iree-codegen-llvm-mmt4d-l1-tile-size", + llvm::cl::desc("linalg.mmt4d L1 tile size"), llvm::cl::ZeroOrMore, + llvm::cl::MiscFlags::CommaSeparated); + +static llvm::cl::list<int> mmt4dVectorSizes( + "iree-codegen-llvm-mmt4d-vector-size", + llvm::cl::desc("linalg.mmt4d vector tile size"), llvm::cl::ZeroOrMore, + llvm::cl::MiscFlags::CommaSeparated); + static llvm::cl::opt<int> defaultWorkgroupTileSize( "iree-codegen-llvm-generic-ops-workgroup-size", llvm::cl::desc( @@ -125,6 +140,46 @@ return success(); } +/// Sets the lowering configuration for dispatch region for linalg.mmt4d root op +static LogicalResult setRootConfig(FuncOp entryPointFn, + linalg::Mmt4DOp mmt4dOp) { + // TODO(ataei): These are hand tuned for some performance benchmarks for now, + // we want to adapt the same strategy as matmul that dynamically sets tile + // size. + auto getWorkgroupTileSizes = [&]() -> SmallVector<int64_t> { + if (!mmt4dWorkgroupTileSizes.empty()) { + return SmallVector<int64_t>(mmt4dWorkgroupTileSizes.begin(), + mmt4dWorkgroupTileSizes.end()); + } + return {64, 32}; + }; + + auto getL1TileSizes = [&]() -> SmallVector<int64_t> { + if (!mmt4dL1TileSizes.empty()) { + return SmallVector<int64_t>(mmt4dL1TileSizes.begin(), + mmt4dL1TileSizes.end()); + } + return {32, 32, 4, 4, 4, 4}; + }; + + auto getVectorSizes = [&]() -> SmallVector<int64_t> { + if (!mmt4dVectorSizes.empty()) { + return SmallVector<int64_t>(mmt4dVectorSizes.begin(), + mmt4dVectorSizes.end()); + } + return {1, 1, 4, 4, 1, 4}; + }; + + SmallVector<int64_t, 4> nativeVectorSize = getVectorSizes(); + + TileSizesListType tileSizes = {getWorkgroupTileSizes(), getL1TileSizes(), + nativeVectorSize}; + + return setOpConfigAndEntryPointFnTranslation( + entryPointFn, mmt4dOp, tileSizes, nativeVectorSize, + IREE::HAL::DispatchLoweringPassPipeline::CPUVectorization); +} + /// Sets the lowering configuration for dispatch region with root op being a /// generic op. static LogicalResult setDefaultRootConfig(FuncOp entryPointFn, Operation *op) { @@ -157,12 +212,18 @@ Operation *rootOp = nullptr; for (auto computeOp : computeOps) { if (!hasMarker(computeOp, getWorkgroupMarker())) continue; - if (auto contractionOp = - dyn_cast<linalg::ContractionOpInterface>(computeOp)) { - if (failed(setRootConfig(entryPointFn, contractionOp))) { - return failure(); - } + + auto setRootConfigFn = [&](Operation *op) -> LogicalResult { + return TypeSwitch<Operation *, LogicalResult>(op) + .Case<linalg::Mmt4DOp, linalg::ContractionOpInterface>( + [&](auto op) { return setRootConfig(entryPointFn, op); }) + .Default([&](Operation *op) { return success(); }); + }; + + if (failed(setRootConfigFn(computeOp))) { + return failure(); } + if (getLoweringConfig(computeOp)) { if (rootOp) { return computeOp->emitError(
diff --git a/iree/compiler/Codegen/LLVMGPU/BUILD b/iree/compiler/Codegen/LLVMGPU/BUILD index fd00b70..6064bea 100644 --- a/iree/compiler/Codegen/LLVMGPU/BUILD +++ b/iree/compiler/Codegen/LLVMGPU/BUILD
@@ -17,6 +17,7 @@ "ConvertToNVVM.cpp", "ConvertToROCDL.cpp", "KernelConfig.cpp", + "LLVMGPUDistributeSharedMemoryCopy.cpp", "LLVMGPULowerExecutableTarget.cpp", "LLVMGPURemoveTrivialLoops.cpp", "LLVMGPUTileAndDistribute.cpp", @@ -27,6 +28,7 @@ hdrs = [ "ConvertToLLVM.h", "KernelConfig.h", + "LLVMGPUUtils.h", ], deps = [ "//iree/compiler/Codegen:PassHeaders", @@ -34,10 +36,10 @@ "//iree/compiler/Codegen/Transforms", "//iree/compiler/Codegen/Utils", "//iree/compiler/Dialect/HAL/IR", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/LinalgExt/IR", "//iree/compiler/Dialect/LinalgExt/Transforms", "//iree/compiler/Dialect/Shape/Transforms", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:Affine", "@llvm-project//mlir:AffineToStandard",
diff --git a/iree/compiler/Codegen/LLVMGPU/CMakeLists.txt b/iree/compiler/Codegen/LLVMGPU/CMakeLists.txt index 20ae473..7101dc2 100644 --- a/iree/compiler/Codegen/LLVMGPU/CMakeLists.txt +++ b/iree/compiler/Codegen/LLVMGPU/CMakeLists.txt
@@ -16,11 +16,13 @@ HDRS "ConvertToLLVM.h" "KernelConfig.h" + "LLVMGPUUtils.h" SRCS "ConvertToLLVM.cpp" "ConvertToNVVM.cpp" "ConvertToROCDL.cpp" "KernelConfig.cpp" + "LLVMGPUDistributeSharedMemoryCopy.cpp" "LLVMGPULowerExecutableTarget.cpp" "LLVMGPURemoveTrivialLoops.cpp" "LLVMGPUTileAndDistribute.cpp" @@ -60,10 +62,10 @@ iree::compiler::Codegen::Transforms iree::compiler::Codegen::Utils iree::compiler::Dialect::HAL::IR - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::LinalgExt::IR iree::compiler::Dialect::LinalgExt::Transforms iree::compiler::Dialect::Shape::Transforms + iree::compiler::Dialect::Util::IR tensorflow::mlir_hlo PUBLIC )
diff --git a/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp b/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp index 6d1edf2..5a40949 100644 --- a/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp +++ b/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp
@@ -9,7 +9,7 @@ #include "iree/compiler/Codegen/PassDetail.h" #include "iree/compiler/Codegen/Passes.h" #include "iree/compiler/Codegen/Utils/Utils.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "mlir/Conversion/LLVMCommon/Pattern.h" #include "mlir/Conversion/LLVMCommon/TypeConverter.h" #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
diff --git a/iree/compiler/Codegen/LLVMGPU/ConvertToNVVM.cpp b/iree/compiler/Codegen/LLVMGPU/ConvertToNVVM.cpp index 13588a2..7b5b008 100644 --- a/iree/compiler/Codegen/LLVMGPU/ConvertToNVVM.cpp +++ b/iree/compiler/Codegen/LLVMGPU/ConvertToNVVM.cpp
@@ -8,7 +8,7 @@ #include "iree/compiler/Codegen/PassDetail.h" #include "iree/compiler/Codegen/Passes.h" #include "iree/compiler/Codegen/Utils/Utils.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "mlir/Conversion/GPUToNVVM/GPUToNVVMPass.h" #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" #include "mlir/Conversion/LLVMCommon/LoweringOptions.h"
diff --git a/iree/compiler/Codegen/LLVMGPU/ConvertToROCDL.cpp b/iree/compiler/Codegen/LLVMGPU/ConvertToROCDL.cpp index b6cf257..671b2ad 100644 --- a/iree/compiler/Codegen/LLVMGPU/ConvertToROCDL.cpp +++ b/iree/compiler/Codegen/LLVMGPU/ConvertToROCDL.cpp
@@ -8,7 +8,7 @@ #include "iree/compiler/Codegen/PassDetail.h" #include "iree/compiler/Codegen/Passes.h" #include "iree/compiler/Codegen/Utils/Utils.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "mlir/Conversion/GPUToROCDL/GPUToROCDLPass.h" #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" #include "mlir/Conversion/LLVMCommon/LoweringOptions.h"
diff --git a/iree/compiler/Codegen/LLVMGPU/KernelConfig.cpp b/iree/compiler/Codegen/LLVMGPU/KernelConfig.cpp index c7643e6..a1e60f7 100644 --- a/iree/compiler/Codegen/LLVMGPU/KernelConfig.cpp +++ b/iree/compiler/Codegen/LLVMGPU/KernelConfig.cpp
@@ -27,13 +27,12 @@ /// Return the best combination of tile size and wg size. It will then used to /// pick the best size aligned with the shape dimension. static void getMatmulConfig(SmallVectorImpl<TileWorkgroupSizePair> &tileSizes) { - tileSizes.push_back(TileWorkgroupSizePair({{16, 128, 4}, {32, 1, 1}})); + tileSizes.push_back(TileWorkgroupSizePair({{64, 128, 8}, {32, 4, 1}})); tileSizes.push_back(TileWorkgroupSizePair({{8, 128, 4}, {32, 1, 1}})); tileSizes.push_back(TileWorkgroupSizePair({{16, 64, 4}, {16, 2, 1}})); } static LogicalResult setContractConfig(FuncOp entryPoint, linalg::LinalgOp op) { - if (getLoweringConfig(op)) return success(); TileSizesListType tileSizes; // Infer the MxN size of the matmul based on operands and indexing maps. auto lhsShape = getUntiledShape(op.getInputOperand(0)->get()); @@ -58,6 +57,7 @@ // Default tile size and workgroup size. int64_t tileX = 2; int64_t tileY = 256; + int64_t tileK = 4; SmallVector<int64_t, 3> workgroupSize = {2 * cudaWarpSize, 1, 1}; SmallVector<TileWorkgroupSizePair> tileSizeConfig; // Query the best configuration. @@ -68,6 +68,7 @@ if (sizeN % config.tileSize[1] == 0 && sizeM % config.tileSize[0] == 0) { tileX = config.tileSize[0]; tileY = config.tileSize[1]; + tileK = config.tileSize[2]; workgroupSize.assign(config.workgroupSize.begin(), config.workgroupSize.end()); break; @@ -80,8 +81,8 @@ // inner dimension with the tileX/tileY size. ts.append(op.getNumParallelLoops() - 2, 1); ts.append({tileX, tileY}); - // Tile all the reduction dimension with a size of 4. - ts.append(op.getNumReductionLoops(), 4); + // Tile all the reduction dimensions. + ts.append(op.getNumReductionLoops(), tileK); tileSizes.push_back(ts); // Workgroup level. tileSizes.push_back({}); // Subgroup level. // At the thread level only tile parallel loops. @@ -90,8 +91,9 @@ {tileX / workgroupSize[1], tileY / workgroupSize[0]}); tileSizes.push_back(invocationLevelTs); // Thread level. return setOpConfigAndEntryPointFnTranslation( - entryPoint, op, tileSizes, /*nativeVectorSizes=*/ArrayRef<int64_t>{}, - IREE::HAL::DispatchLoweringPassPipeline::LLVMGPUVectorize, workgroupSize); + entryPoint, op, tileSizes, /*nativeVectorSize=*/ArrayRef<int64_t>{}, + IREE::HAL::DispatchLoweringPassPipeline::LLVMGPUMatmulSimt, + workgroupSize); } // Basic default properties for linalg ops that haven't been tuned. @@ -130,13 +132,18 @@ break; } } + if (auto linalgOp = dyn_cast<linalg::LinalgOp>(op)) { + // Tile reduction dimension to 1. Using a large tile size may allow better + // scheduling and could help in case one of the input has transpose. + // TODO(thomasraoux): improve the heuristic. + workgroupTileSizes.append(linalgOp.getNumReductionLoops(), 1); + } tileSizes.emplace_back(std::move(workgroupTileSizes)); // Workgroup level tileSizes.push_back({}); // Subgroup level. tileSizes.emplace_back(std::move(threadTileSizes)); // Thread level return setOpConfigAndEntryPointFnTranslation( - entryPoint, op, tileSizes, /*nativeVectorSizes=*/ArrayRef<int64_t>{}, - IREE::HAL::DispatchLoweringPassPipeline::LLVMGPUDistribute, - workgroupSize); + entryPoint, op, tileSizes, /*nativeVectorSize=*/ArrayRef<int64_t>{}, + IREE::HAL::DispatchLoweringPassPipeline::LLVMGPUVectorize, workgroupSize); } static LogicalResult setRootConfig(FuncOp entryPointFn, Operation *computeOp) {
diff --git a/iree/compiler/Codegen/LLVMGPU/LLVMGPUDistributeSharedMemoryCopy.cpp b/iree/compiler/Codegen/LLVMGPU/LLVMGPUDistributeSharedMemoryCopy.cpp new file mode 100644 index 0000000..91ff071 --- /dev/null +++ b/iree/compiler/Codegen/LLVMGPU/LLVMGPUDistributeSharedMemoryCopy.cpp
@@ -0,0 +1,278 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include <algorithm> +#include <numeric> + +#include "iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.h" +#include "iree/compiler/Codegen/PassDetail.h" +#include "iree/compiler/Codegen/Passes.h" +#include "iree/compiler/Codegen/Transforms/Transforms.h" +#include "iree/compiler/Codegen/Utils/MarkerUtils.h" +#include "mlir/Dialect/GPU/Passes.h" +#include "mlir/Dialect/Vector/VectorTransforms.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/Support/MathExtras.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "mlir/Transforms/Passes.h" + +//====---------------------------------------------------------------------===// +// Pass to lower workgroup memory copy to distibuted +// transfer_read/transfer_write ops. +//====---------------------------------------------------------------------===// + +namespace mlir { +namespace iree_compiler { + +/// Patterns for copy to shared memory mapping. Copy to shared memory are not +/// part of the launch config but needs to be distributed on the workgroup +/// picked by the root op. +static void populateTilingCopyToWorkgroupMemPatterns( + OwningRewritePatternList &patterns, ArrayRef<int64_t> workgroupSize) { + // Tile and distribute copy to workgroup memory. + linalg::TileSizeComputationFunction wgCopyTileSizeFn = + [](OpBuilder &builder, Operation *operation) { + const int64_t copyTileSize = 4; + // We tile to 4 as we want each thread to load 4 element in a cyclic + // distribution. + SmallVector<Value, 4> tileSizesVal; + unsigned rank = + cast<linalg::CopyOp>(operation).getOutputBufferTypes()[0].getRank(); + for (unsigned i = 0; i < rank - 1; i++) { + int64_t t = (rank - i) <= kNumGPUDims ? 1 : 0; + tileSizesVal.push_back( + builder.create<ConstantIndexOp>(operation->getLoc(), t)); + } + tileSizesVal.push_back( + builder.create<ConstantIndexOp>(operation->getLoc(), copyTileSize)); + return tileSizesVal; + }; + auto getCopyThreadProcInfoFn = [workgroupSize]( + OpBuilder &builder, Location loc, + ArrayRef<Range> parallelLoopRanges) { + return getGPUThreadIdsAndCounts(builder, loc, parallelLoopRanges.size(), + workgroupSize); + }; + linalg::LinalgLoopDistributionOptions copyInvocationDistributionOptions; + copyInvocationDistributionOptions.procInfo = getCopyThreadProcInfoFn; + copyInvocationDistributionOptions.distributionMethod = { + {linalg::DistributionMethod::Cyclic, linalg::DistributionMethod::Cyclic, + linalg::DistributionMethod::Cyclic}}; + + auto tilingOptions = + linalg::LinalgTilingOptions() + .setLoopType(linalg::LinalgTilingLoopType::Loops) + .setTileSizeComputationFunction(wgCopyTileSizeFn) + .setDistributionOptions(copyInvocationDistributionOptions); + patterns.insert<linalg::LinalgTilingPattern<linalg::CopyOp>>( + patterns.getContext(), tilingOptions, + linalg::LinalgTransformationFilter( + {Identifier::get(getCopyToWorkgroupMemoryMarker(), + patterns.getContext())}, + Identifier::get(getVectorizeMarker(), patterns.getContext()))); +} + +static void populateVectorizationPatterns(RewritePatternSet &patterns) { + linalg::insertVectorizationPatterns<linalg::CopyOp>( + patterns, linalg::LinalgVectorizationOptions(), + linalg::LinalgTransformationFilter(Identifier::get( + getCopyToWorkgroupMemoryMarker(), patterns.getContext()))); +} + +// TODO(thomasraoux): Extend this to support smaller vector size as well. +static constexpr int targetVectorSize = 4; + +/// Compute a vector size so that the numer of elements is equal to the flat +/// workgroup size. +static Optional<SmallVector<int64_t, 4>> getGPUNativeVectorSize( + Operation *op, int64_t flatWorkgroupSize) { + auto vt = dyn_cast<VectorTransferOpInterface>(op); + if (!vt) return llvm::None; + if (!vt.permutation_map().isMinorIdentity()) return llvm::None; + ArrayRef<int64_t> shape = vt.getVectorType().getShape(); + SmallVector<int64_t, 4> unroll; + assert(shape.back() % targetVectorSize == 0); + int64_t threadsAvailable = flatWorkgroupSize; + for (auto &dim : llvm::enumerate(llvm::reverse(shape))) { + int64_t numElementPerThread = dim.index() == 0 ? targetVectorSize : 1; + int64_t numThreads = dim.value() / numElementPerThread; + numThreads = std::min(numThreads, threadsAvailable); + unroll.push_back(numThreads * numElementPerThread); + assert(threadsAvailable % numThreads == 0); + threadsAvailable = threadsAvailable / numThreads; + if (threadsAvailable == 1) break; + } + assert(threadsAvailable == 1); + unroll.resize(shape.size(), 1); + std::reverse(unroll.begin(), unroll.end()); + if (unroll == shape) return llvm::None; + return unroll; +} + +static void populateVectorUnrollPatterns(RewritePatternSet &patterns, + int64_t flatWorkgroupSize) { + auto getShape = [flatWorkgroupSize](Operation *op) { + return getGPUNativeVectorSize(op, flatWorkgroupSize); + }; + vector::populateVectorUnrollPatterns( + patterns, vector::UnrollVectorOptions().setNativeShapeFn(getShape)); +} + +/// Return a flattened Id Value by combining the 3D gpu thread IDs. +static Value createFlatId(FuncOp funcOp, std::array<int64_t, 3> workgroupSize) { + OpBuilder b(funcOp.getBody()); + Type indexType = b.getIndexType(); + AffineExpr d0 = getAffineDimExpr(0, b.getContext()); + AffineExpr d1 = getAffineDimExpr(1, b.getContext()); + AffineExpr d2 = getAffineDimExpr(2, b.getContext()); + Value threadX = b.create<gpu::ThreadIdOp>(funcOp.getLoc(), indexType, + b.getStringAttr("x")); + Value threadY = b.create<gpu::ThreadIdOp>(funcOp.getLoc(), indexType, + b.getStringAttr("y")); + Value threadZ = b.create<gpu::ThreadIdOp>(funcOp.getLoc(), indexType, + b.getStringAttr("z")); + Value flatThreadId = makeComposedAffineApply( + b, funcOp.getLoc(), + d0 + workgroupSize[0] * d1 + (workgroupSize[0] * workgroupSize[1]) * d2, + {threadX, threadY, threadZ}); + return flatThreadId; +} + +/// Distribute a transfer read operations on the given thread ids. +static void distributeTransferRead(FuncOp funcOp, Value flatThreadId, + int64_t flatWorkgroupSize) { + funcOp.walk([&](vector::TransferReadOp readOp) { + OpBuilder b(readOp); + Value id = flatThreadId; + SmallVector<int64_t, 2> multiplier; + auto shape = readOp.getVectorType().getShape(); + SmallVector<Value> ids; + SmallVector<AffineExpr> exprs; + AffineExpr d0 = getAffineDimExpr(0, b.getContext()); + int64_t numThreads = flatWorkgroupSize; + for (auto &dim : llvm::enumerate(llvm::reverse(shape))) { + int64_t threads = + dim.index() == 0 ? (dim.value() / targetVectorSize) : dim.value(); + // If we don't need to distribute the dimension, skip it. + if (threads == 1) continue; + exprs.push_back(getAffineDimExpr(shape.size() - dim.index() - 1, + funcOp->getContext())); + multiplier.push_back(threads); + Value dimId = id; + assert(numThreads % threads == 0); + if (numThreads / threads > 1) + dimId = + makeComposedAffineApply(b, funcOp.getLoc(), d0 % threads, {dimId}); + ids.push_back(dimId); + numThreads = numThreads / threads; + id = makeComposedAffineApply(b, funcOp.getLoc(), d0.floorDiv(threads), + {id}); + if (numThreads <= 1) break; + } + std::reverse(ids.begin(), ids.end()); + Optional<mlir::vector::DistributeOps> ops = + vector::distributPointwiseVectorOp( + b, readOp, ids, multiplier, + AffineMap::get(shape.size(), 0, exprs, funcOp.getContext())); + if (ops.hasValue()) { + SmallPtrSet<Operation *, 1> extractOp({ops->extract, ops->insert}); + readOp.getResult().replaceAllUsesExcept(ops->insert.getResult(), + extractOp); + } + }); +} + +namespace { + +class LLVMGPUDistributeSharedMemoryCopyPass + : public LLVMGPUDistributeSharedMemoryCopyBase< + LLVMGPUDistributeSharedMemoryCopyPass> { + void getDependentDialects(DialectRegistry ®istry) const override { + registry.insert<vector::VectorDialect>(); + } + void runOnOperation() override { + FuncOp funcOp = getOperation(); + std::array<int64_t, 3> workgroupSize = getWorkgroupSize(funcOp); + MLIRContext *context = &getContext(); + SmallVector<linalg::CopyOp> copiesToWorkgroupMem; + funcOp.walk([&](linalg::CopyOp copyOp) { + if (hasMarker(copyOp, getCopyToWorkgroupMemoryMarker())) + copiesToWorkgroupMem.push_back(copyOp); + }); + if (copiesToWorkgroupMem.empty()) return; + int64_t flatWorkgroupSize = + workgroupSize[0] * workgroupSize[1] * workgroupSize[2]; + bool isAligned = llvm::all_of( + copiesToWorkgroupMem, [flatWorkgroupSize](linalg::CopyOp copyOp) { + auto shape = copyOp.output().getType().cast<MemRefType>().getShape(); + // Verify that each dimension of the shape can be distributed on the + // threads + int64_t threadsAvailable = flatWorkgroupSize; + for (auto &dim : llvm::enumerate(llvm::reverse(shape))) { + int64_t numElementPerThread = + dim.index() == 0 ? targetVectorSize : 1; + int64_t numThreads = dim.value() / numElementPerThread; + if (numThreads == 0) return false; + numThreads = std::min(numThreads, threadsAvailable); + if (threadsAvailable % numThreads != 0) return false; + threadsAvailable = threadsAvailable / numThreads; + if (threadsAvailable == 1) break; + } + return threadsAvailable == 1; + }); + if (isAligned) { + // Step 1. Vectorize the shared memory copy. + RewritePatternSet vectorizationPatterns(context); + populateVectorizationPatterns(vectorizationPatterns); + (void)applyPatternsAndFoldGreedily(funcOp, + std::move(vectorizationPatterns)); + + // Step 2. Unroll transfer_read/transfer_write to a vector with the number + // of element equal to `targetVectorSize * targetVectorSize`. The. + // transfer op generated can. then be distributed to a single op of target + // size. + RewritePatternSet vectorUnrollPatterns(context); + populateVectorUnrollPatterns(vectorUnrollPatterns, flatWorkgroupSize); + (void)applyPatternsAndFoldGreedily(funcOp, + std::move(vectorUnrollPatterns)); + // Step 3. Distribute the transfer ops onto the flat ids. + Value flatId = createFlatId(funcOp, workgroupSize); + distributeTransferRead(funcOp, flatId, flatWorkgroupSize); + // Propagate vector distribution to the chain of ops. + RewritePatternSet distributePatterns(context); + vector::populatePropagateVectorDistributionPatterns(distributePatterns); + (void)applyPatternsAndFoldGreedily(funcOp, std::move(distributePatterns)); + } else { + // Fall back to basic tiling for cases where workgroup memory size is not + // well aligned on the number of threads. + // TODO(thomasraoux): Handle this case with padding instead so that we get + // good performance for more complex shapes. + OwningRewritePatternList threadLevelTilingPatterns(context); + populateTilingCopyToWorkgroupMemPatterns(threadLevelTilingPatterns, + workgroupSize); + (void)applyPatternsAndFoldGreedily(funcOp, + std::move(threadLevelTilingPatterns)); + // Apply canonicalization patterns. + RewritePatternSet threadTilingCanonicalizationPatterns = + linalg::getLinalgTilingCanonicalizationPatterns(context); + populateAffineMinSCFCanonicalizationPattern( + threadTilingCanonicalizationPatterns); + (void)applyPatternsAndFoldGreedily( + funcOp, std::move(threadTilingCanonicalizationPatterns)); + } + } +}; + +} // namespace + +std::unique_ptr<OperationPass<FuncOp>> +createLLVMGPUDistributeSharedMemoryCopy() { + return std::make_unique<LLVMGPUDistributeSharedMemoryCopyPass>(); +} + +} // namespace iree_compiler +} // namespace mlir
diff --git a/iree/compiler/Codegen/LLVMGPU/LLVMGPULowerExecutableTarget.cpp b/iree/compiler/Codegen/LLVMGPU/LLVMGPULowerExecutableTarget.cpp index 5bcb04c..4a8c67b 100644 --- a/iree/compiler/Codegen/LLVMGPU/LLVMGPULowerExecutableTarget.cpp +++ b/iree/compiler/Codegen/LLVMGPU/LLVMGPULowerExecutableTarget.cpp
@@ -103,6 +103,9 @@ case IREE::HAL::DispatchLoweringPassPipeline::LLVMGPUVectorize: addGPUVectorizationPassPipeline(nestedModulePM); break; + case IREE::HAL::DispatchLoweringPassPipeline::LLVMGPUMatmulSimt: + addGPUMatmulSimtPassPipeline(nestedModulePM); + break; default: llvm_unreachable("Unsupported pipeline on GPU target."); }
diff --git a/iree/compiler/Codegen/LLVMGPU/LLVMGPURemoveTrivialLoops.cpp b/iree/compiler/Codegen/LLVMGPU/LLVMGPURemoveTrivialLoops.cpp index bc7ef40..aa6ed43 100644 --- a/iree/compiler/Codegen/LLVMGPU/LLVMGPURemoveTrivialLoops.cpp +++ b/iree/compiler/Codegen/LLVMGPU/LLVMGPURemoveTrivialLoops.cpp
@@ -4,6 +4,7 @@ // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +#include "iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.h" #include "iree/compiler/Codegen/PassDetail.h" #include "iree/compiler/Codegen/Passes.h" #include "iree/compiler/Codegen/Transforms/Transforms.h" @@ -20,7 +21,7 @@ /// If the value is a threadID return the range [0, workgroupSize-1]. static Optional<std::pair<AffineExpr, AffineExpr>> threadIdMinMax( Value value, SmallVectorImpl<Value> &dims, SmallVectorImpl<Value> &symbols, - ArrayRef<int32_t> workgroupSize) { + ArrayRef<int64_t> workgroupSize) { if (auto idOp = value.getDefiningOp<gpu::ThreadIdOp>()) { unsigned index = StringSwitch<unsigned>(idOp.dimension()) .Case("x", 0) @@ -41,14 +42,7 @@ LLVMGPURemoveSingleIterationLoopPass> { void runOnOperation() override { FuncOp funcOp = getOperation(); - auto entryPointOp = getEntryPoint(funcOp); - Optional<ArrayAttr> workgroupSizeAttr = entryPointOp.workgroup_size(); - if (!workgroupSizeAttr) return; - std::array<int32_t, 3> workgroupSize; - for (auto it : llvm::enumerate(workgroupSizeAttr.getValue())) { - workgroupSize[it.index()] = - it.value().cast<IntegerAttr>().getValue().getZExtValue(); - } + std::array<int64_t, 3> workgroupSize = getWorkgroupSize(funcOp); auto getThreadIdMinMax = [&workgroupSize](Value value, SmallVectorImpl<Value> &dims, SmallVectorImpl<Value> &symbols) {
diff --git a/iree/compiler/Codegen/LLVMGPU/LLVMGPUTileAndDistribute.cpp b/iree/compiler/Codegen/LLVMGPU/LLVMGPUTileAndDistribute.cpp index 535ebe8..18a69b2 100644 --- a/iree/compiler/Codegen/LLVMGPU/LLVMGPUTileAndDistribute.cpp +++ b/iree/compiler/Codegen/LLVMGPU/LLVMGPUTileAndDistribute.cpp
@@ -5,15 +5,16 @@ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception #include "iree/compiler/Codegen/LLVMGPU/KernelConfig.h" +#include "iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.h" #include "iree/compiler/Codegen/PassDetail.h" #include "iree/compiler/Codegen/Passes.h" #include "iree/compiler/Codegen/Transforms/Transforms.h" #include "iree/compiler/Codegen/Utils/MarkerUtils.h" #include "iree/compiler/Codegen/Utils/Utils.h" #include "iree/compiler/Dialect/HAL/IR/LoweringConfig.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" #include "iree/compiler/Dialect/LinalgExt/IR/LinalgExtOps.h" #include "iree/compiler/Dialect/LinalgExt/Transforms/Transforms.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "mlir/Conversion/GPUToNVVM/GPUToNVVMPass.h" #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" #include "mlir/Dialect/GPU/Passes.h" @@ -29,23 +30,6 @@ namespace mlir { namespace iree_compiler { -static SmallVector<linalg::ProcInfo, 2> getGPUThreadIdsAndCounts( - OpBuilder &builder, Location loc, unsigned numDims, - ArrayRef<int64_t> workgroupSize) { - assert(numDims <= kNumMaxParallelDims); - SmallVector<linalg::ProcInfo, 2> procInfo(numDims); - std::array<StringRef, kNumMaxParallelDims> dimAttr{"x", "y", "z"}; - Type indexType = builder.getIndexType(); - for (unsigned i = 0; i < numDims; ++i) { - StringAttr attr = builder.getStringAttr(dimAttr[i]); - procInfo[numDims - 1 - i] = { - builder.create<gpu::ThreadIdOp>(loc, indexType, attr), - builder.create<ConstantOp>(loc, - builder.getIndexAttr(workgroupSize[i]))}; - } - return procInfo; -} - /// Patterns for workgroup level tiling. Workgroup tiling is done at the flow /// level but we may have extra tiling for the reduction dimension. Therefore we /// tile again without distributing. @@ -142,111 +126,9 @@ Identifier::get(getVectorizeMarker(), context))); } -/// Patterns for copy to shared memory mapping. Copy to shared memory are not -/// part of the launch config but needs to be distributed on the workgroup -/// picked by the root op. -static void populateTilingCopyToWorkgroupMemPatterns( - MLIRContext *context, OwningRewritePatternList &patterns, - ArrayRef<int64_t> workgroupSize) { - // Tile and distribute copy to workgroup memory. - linalg::TileSizeComputationFunction wgCopyTileSizeFn = - [](OpBuilder &builder, Operation *operation) { - const int64_t copyTileSize = 4; - // We tile to 4 as we want each thread to load 4 element in a cyclic - // distribution. - SmallVector<Value, 4> tileSizesVal; - unsigned rank = - cast<linalg::CopyOp>(operation).getOutputBufferTypes()[0].getRank(); - for (unsigned i = 0; i < rank - 1; i++) { - int64_t t = (rank - i) <= kNumMaxParallelDims ? 1 : 0; - tileSizesVal.push_back( - builder.create<ConstantIndexOp>(operation->getLoc(), t)); - } - tileSizesVal.push_back( - builder.create<ConstantIndexOp>(operation->getLoc(), copyTileSize)); - return tileSizesVal; - }; - auto getCopyThreadProcInfoFn = [workgroupSize]( - OpBuilder &builder, Location loc, - ArrayRef<Range> parallelLoopRanges) { - SmallVector<std::array<int64_t, 3>, 2> staticRanges; - bool hasDynamicRange = false; - // If the ranges are not constant fall back to naive disribution. - for (auto range : parallelLoopRanges) { - auto cstOffset = range.offset.getDefiningOp<ConstantIndexOp>(); - auto cstSize = range.size.getDefiningOp<ConstantIndexOp>(); - auto cstStride = range.stride.getDefiningOp<ConstantIndexOp>(); - if (!cstOffset || !cstSize || !cstStride) { - hasDynamicRange = true; - break; - } - staticRanges.push_back( - {cstOffset.getValue(), cstSize.getValue(), cstStride.getValue()}); - } - // Only support static dimension with 1D workgroups for now. Fall back to - // the naive distribution for other cases. - if (hasDynamicRange || workgroupSize[1] != 1 || workgroupSize[2] != 1) - return getGPUThreadIdsAndCounts(builder, loc, parallelLoopRanges.size(), - workgroupSize); - Value serializedId = - builder.create<gpu::ThreadIdOp>(loc, builder.getIndexType(), "x"); - int64_t numIds = workgroupSize[0]; - int numDims = parallelLoopRanges.size(); - SmallVector<linalg::ProcInfo, 2> procInfo(numDims); - assert(numDims <= kNumMaxParallelDims); - // Distribute the available Ids on the loop dimensions. - for (int i = numDims - 1; i >= 0; i--) { - std::array<int64_t, 3> &range = staticRanges[i]; - Value id = serializedId; - int64_t interval = ceilDiv(range[1] - range[0], range[2]); - Value intervalValue = builder.create<ConstantIndexOp>(loc, interval); - int64_t count = 0; - if (numIds <= 1) { - count = 1; - id = builder.create<ConstantIndexOp>(loc, 0); - } else if (numIds > interval) { - AffineExpr d0 = getAffineDimExpr(0, builder.getContext()); - AffineExpr s0 = getAffineSymbolExpr(0, builder.getContext()); - if (i > 0) - id = makeComposedAffineApply(builder, loc, d0 % s0, - {id, intervalValue}); - count = interval; - } else { - count = numIds; - } - numIds = numIds / interval; - AffineExpr d0 = getAffineDimExpr(0, builder.getContext()); - AffineExpr s0 = getAffineSymbolExpr(0, builder.getContext()); - serializedId = makeComposedAffineApply(builder, loc, d0.floorDiv(s0), - {serializedId, intervalValue}); - procInfo[i] = {id, builder.create<ConstantIndexOp>(loc, count)}; - } - return procInfo; - }; - linalg::LinalgLoopDistributionOptions copyInvocationDistributionOptions; - copyInvocationDistributionOptions.procInfo = getCopyThreadProcInfoFn; - copyInvocationDistributionOptions.distributionMethod = { - {linalg::DistributionMethod::Cyclic, linalg::DistributionMethod::Cyclic, - linalg::DistributionMethod::Cyclic}}; - - auto tilingOptions = - linalg::LinalgTilingOptions() - .setLoopType(linalg::LinalgTilingLoopType::Loops) - .setTileSizeComputationFunction(wgCopyTileSizeFn) - .setDistributionOptions(copyInvocationDistributionOptions); - patterns.insert<linalg::LinalgTilingPattern<linalg::CopyOp>>( - context, tilingOptions, - linalg::LinalgTransformationFilter( - {Identifier::get(getCopyToWorkgroupMemoryMarker(), context)}, - Identifier::get(getVectorizeMarker(), context))); -} - static LogicalResult copyToWorkgroupMemory(OpBuilder &b, Value src, Value dst) { - // TODO(thomasraoux): Improve barrier placement. - b.create<gpu::BarrierOp>(src.getLoc()); auto copyOp = b.create<linalg::CopyOp>(src.getLoc(), src, dst); setMarker(copyOp, getCopyToWorkgroupMemoryMarker()); - b.create<gpu::BarrierOp>(src.getLoc()); return success(); } @@ -350,6 +232,23 @@ OwningRewritePatternList promotionPatterns(&getContext()); populatePromotionPatterns(context, promotionPatterns); (void)applyPatternsAndFoldGreedily(funcOp, std::move(promotionPatterns)); + // Insert barriers before and after copies to workgroup memory and skip + // insert barriers between back to back copy to workgroup memory. + OpBuilder builder(&getContext()); + funcOp.walk([&builder](linalg::CopyOp copyOp) { + if (hasMarker(copyOp, getCopyToWorkgroupMemoryMarker())) { + Operation *prevOp = copyOp->getPrevNode(); + if (!prevOp || !hasMarker(prevOp, getCopyToWorkgroupMemoryMarker())) { + builder.setInsertionPoint(copyOp); + builder.create<gpu::BarrierOp>(copyOp.getLoc()); + } + Operation *nextOp = copyOp->getNextNode(); + if (!nextOp || !hasMarker(nextOp, getCopyToWorkgroupMemoryMarker())) { + builder.setInsertionPointAfter(copyOp); + builder.create<gpu::BarrierOp>(copyOp.getLoc()); + } + } + }); } { @@ -372,8 +271,6 @@ OwningRewritePatternList threadLevelTilingPatterns(context); populateTilingToInvocationPatterns(context, threadLevelTilingPatterns, workgroupSize); - populateTilingCopyToWorkgroupMemPatterns( - context, threadLevelTilingPatterns, workgroupSize); (void)applyPatternsAndFoldGreedily(funcOp, std::move(threadLevelTilingPatterns)); }
diff --git a/iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.h b/iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.h new file mode 100644 index 0000000..16ddcab --- /dev/null +++ b/iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.h
@@ -0,0 +1,46 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#ifndef IREE_COMPILER_CODEGEN_LLVMGPU_LLVMGPUUTILS_H_ +#define IREE_COMPILER_CODEGEN_LLVMGPU_LLVMGPUUTILS_H_ + +#include "iree/compiler/Codegen/Transforms/Transforms.h" +#include "iree/compiler/Codegen/Utils/Utils.h" +#include "mlir/Dialect/GPU/Passes.h" + +static constexpr int32_t kNumGPUDims = 3; + +static llvm::SmallVector<mlir::linalg::ProcInfo, 2> getGPUThreadIdsAndCounts( + mlir::OpBuilder &builder, mlir::Location loc, unsigned numDims, + llvm::ArrayRef<int64_t> workgroupSize) { + assert(numDims <= kNumGPUDims); + llvm::SmallVector<mlir::linalg::ProcInfo, 2> procInfo(numDims); + std::array<llvm::StringRef, kNumGPUDims> dimAttr{"x", "y", "z"}; + mlir::Type indexType = builder.getIndexType(); + for (unsigned i = 0; i < numDims; ++i) { + mlir::StringAttr attr = builder.getStringAttr(dimAttr[i]); + procInfo[numDims - 1 - i] = { + builder.create<mlir::gpu::ThreadIdOp>(loc, indexType, attr), + builder.create<mlir::ConstantOp>( + loc, builder.getIndexAttr(workgroupSize[i]))}; + } + return procInfo; +} + +static std::array<int64_t, 3> getWorkgroupSize(mlir::FuncOp funcOp) { + std::array<int64_t, 3> workgroupSize; + auto entryPointOp = mlir::iree_compiler::getEntryPoint(funcOp); + llvm::Optional<mlir::ArrayAttr> workgroupSizeAttr = + entryPointOp.workgroup_size(); + assert(workgroupSizeAttr.hasValue()); + for (auto it : llvm::enumerate(workgroupSizeAttr.getValue())) { + workgroupSize[it.index()] = + it.value().cast<mlir::IntegerAttr>().getValue().getZExtValue(); + } + return workgroupSize; +} + +#endif // IREE_COMPILER_CODEGEN_LLVMGPU_LLVMGPUUTILS_H_
diff --git a/iree/compiler/Codegen/LLVMGPU/LLVMGPUVectorization.cpp b/iree/compiler/Codegen/LLVMGPU/LLVMGPUVectorization.cpp index cdf55d6..c5c4c7e 100644 --- a/iree/compiler/Codegen/LLVMGPU/LLVMGPUVectorization.cpp +++ b/iree/compiler/Codegen/LLVMGPU/LLVMGPUVectorization.cpp
@@ -25,30 +25,11 @@ //====---------------------------------------------------------------------===// static void populateVectorizationPatterns(RewritePatternSet &patterns) { - // We currently don't support vectorization of generic ops with reduction. - // TODO(thomasraoux): Add lowering for vector.multireduce ops. - auto filterReduction = [](Operation *op) { - if (auto genericOp = llvm::dyn_cast<linalg::GenericOp>(op)) { - auto linalgOp = cast<linalg::LinalgOp>(op); - // TODO(thomasraoux): Disable vectorization if the output indexing map has - // permutation to workaround a bug in MLIR core. This will be removed once - // the fix is integrated. - bool vectorizeContract = - linalg::isaContractionOpInterface(linalgOp) && - compressUnusedDims( - linalgOp.getTiedIndexingMap(linalgOp.getOutputOperand(0))) - .isIdentity(); - if (!vectorizeContract && genericOp.getNumReductionLoops() > 0) - return failure(); - } - return success(); - }; linalg::insertVectorizationPatterns<linalg::FillOp, linalg::CopyOp, linalg::GenericOp, linalg::ContractionOpInterface>( patterns, linalg::LinalgVectorizationOptions(), linalg::LinalgTransformationFilter( - filterReduction, Identifier::get(getVectorizeMarker(), patterns.getContext()))); } @@ -111,7 +92,6 @@ populateVectorUnrollPatterns(vectorUnrollPatterns); (void)applyPatternsAndFoldGreedily(funcOp, std::move(vectorUnrollPatterns)); - linalg::hoistRedundantVectorTransfers(funcOp); } { // Step 2. Lower transfer op to canonical form. @@ -144,6 +124,8 @@ contractLoweringPatterns, vector::VectorTransformsOptions().setVectorTransformsOptions( vector::VectorContractLowering::OuterProduct)); + vector::populateVectorMultiReductionLoweringPatterns( + contractLoweringPatterns); (void)applyPatternsAndFoldGreedily(funcOp, std::move(contractLoweringPatterns)); }
diff --git a/iree/compiler/Codegen/LLVMGPU/Passes.cpp b/iree/compiler/Codegen/LLVMGPU/Passes.cpp index c64e6fa..e9f9cbd 100644 --- a/iree/compiler/Codegen/LLVMGPU/Passes.cpp +++ b/iree/compiler/Codegen/LLVMGPU/Passes.cpp
@@ -52,6 +52,30 @@ pm.addNestedPass<FuncOp>(createOptimizeVectorTransferPass()); } +void addGPUMatmulSimtPassPipeline(OpPassManager &pm) { + // Convert tensor to buffers. + addLinalgBufferizePasses(pm, gpuAllocationFunction); + //===--------------------------------------------------------------------===// + // Initial clean up. + //===--------------------------------------------------------------------===// + pm.addPass(createCanonicalizerPass()); + pm.addPass(createCSEPass()); + + // Distribute linalg onto threads within the workgroup. + pm.addNestedPass<FuncOp>(createLLVMGPUTileAndDistributeToThreads()); + pm.addNestedPass<FuncOp>(createLLVMGPUDistributeSharedMemoryCopy()); + pm.addPass(createCanonicalizerPass()); + pm.addPass(createCSEPass()); + + pm.addNestedPass<FuncOp>(createLLVMGPURemoveSingleIterationLoopPass()); + + // Linalg -> vector + pm.addNestedPass<FuncOp>(createLLVMGPUVectorizationPass()); + pm.addNestedPass<FuncOp>(createCanonicalizerPass()); + pm.addNestedPass<FuncOp>(createCSEPass()); + pm.addNestedPass<FuncOp>(createOptimizeVectorTransferPass()); +} + void addGPUSimpleDistributePassPipeline(OpPassManager &pm) { // Convert tensor to buffers. addLinalgBufferizePasses(pm, gpuAllocationFunction);
diff --git a/iree/compiler/Codegen/LLVMGPU/test/BUILD b/iree/compiler/Codegen/LLVMGPU/test/BUILD index 4f88a04..d30dc05 100644 --- a/iree/compiler/Codegen/LLVMGPU/test/BUILD +++ b/iree/compiler/Codegen/LLVMGPU/test/BUILD
@@ -22,6 +22,7 @@ "convert_to_nvvm.mlir", "convert_to_rocdl.mlir", "distribute_to_thread.mlir", + "distribute_wg_copy.mlir", "gpu_set_num_workgroups.mlir", "nvvm_pipeline_test.mlir", "remove_loops.mlir",
diff --git a/iree/compiler/Codegen/LLVMGPU/test/CMakeLists.txt b/iree/compiler/Codegen/LLVMGPU/test/CMakeLists.txt index 4da0394..fd907e2 100644 --- a/iree/compiler/Codegen/LLVMGPU/test/CMakeLists.txt +++ b/iree/compiler/Codegen/LLVMGPU/test/CMakeLists.txt
@@ -17,6 +17,7 @@ "convert_to_nvvm.mlir" "convert_to_rocdl.mlir" "distribute_to_thread.mlir" + "distribute_wg_copy.mlir" "gpu_set_num_workgroups.mlir" "legalize.mlir" "nvvm_pipeline_test.mlir"
diff --git a/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir b/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir index 17fafcc..1d5faba 100644 --- a/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir +++ b/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir
@@ -64,27 +64,15 @@ // CHECK: memref.global "private" @{{.*}} : memref<4x256xf32, 3> // CHECK: memref.global "private" @{{.*}} : memref<2x4xf32, 3> // CHECK-DAG: %[[C0:.+]] = constant 0 : index -// CHECK-DAG: %[[C1:.+]] = constant 1 : index // CHECK-DAG: %[[C2:.+]] = constant 2 : index // CHECK-DAG: %[[C4:.+]] = constant 4 : index // CHECK-DAG: %[[C256:.+]] = constant 256 : index // CHECK-DAG: %[[C1024:.+]] = constant 1024 : index // CHECK: scf.for %[[K:.+]] = %[[C0]] to %[[C1024]] step %[[C4]] { // CHECK: gpu.barrier -// CHECK: %[[TIDX:.+]] = "gpu.thread_id"() {dimension = "x"} : () -> index -// CHECK: scf.for %[[IND:.+]] = %[[TIDX]] to %[[C2]] step %[[C2]] { -// CHECK-DAG: %[[SRC:.+]] = memref.subview %{{.*}}[%[[IND]], 0] [1, 4] [1, 1] : memref<2x4xf32, #{{.*}}> to memref<1x4xf32, #{{.*}}> -// CHECK-DAG: %[[DST:.+]] = memref.subview %{{.*}}[%[[IND]], 0] [1, 4] [1, 1] : memref<2x4xf32, #{{.*}}, 3> to memref<1x4xf32, #{{.*}}, 3> -// CHECK: linalg.copy(%[[SRC]], %[[DST]]) {__internal_linalg_transform__ = "vectorize"} : memref<1x4xf32, #{{.*}}>, memref<1x4xf32, #{{.*}}, 3> -// CHECK: } -// CHECK: %[[TIDX:.+]] = "gpu.thread_id"() {dimension = "x"} : () -> index -// CHECK: scf.for %[[IND0:.+]] = %[[C0]] to %[[C4]] step %[[C1]] { -// CHECK: scf.for %[[IND1:.+]] = %{{.*}} to %[[C256]] step %[[C256]] { -// CHECK-DAG: %[[SRC:.+]] = memref.subview %{{.*}}[%[[IND0]], %[[IND1]]] [1, 4] [1, 1] : memref<4x256xf32, #{{.*}}> to memref<1x4xf32, #{{.*}}> -// CHECK-DAG: %[[DST:.+]] = memref.subview %{{.*}}[%[[IND0]], %[[IND1]]] [1, 4] [1, 1] : memref<4x256xf32, #{{.*}}, 3> to memref<1x4xf32, #{{.*}}, 3> -// CHECK: linalg.copy(%[[SRC]], %[[DST]]) {__internal_linalg_transform__ = "vectorize"} : memref<1x4xf32, #{{.*}}>, memref<1x4xf32, #{{.*}}, 3> -// CHECK: } -// CHECK: } +// CHECK: linalg.copy(%{{.*}}, %{{.*}}) {__internal_linalg_transform__ = "copy_to_workgroup_memory"} : memref<2x4xf32, #{{.*}}>, memref<2x4xf32, #{{.*}}, 3> +// CHECK-NOT: gpu.barrier +// CHECK: linalg.copy(%{{.*}}, %{{.*}}) {__internal_linalg_transform__ = "copy_to_workgroup_memory"} : memref<4x256xf32, #{{.*}}>, memref<4x256xf32, #{{.*}}, 3> // CHECK: gpu.barrier // CHECK: scf.for %[[IND0:.+]] = %{{.*}} to %[[C2]] step %[[C2]] { // CHECK: scf.for %[[IND1:.+]] = %{{.*}} to %[[C256]] step %[[C256]] { @@ -97,89 +85,6 @@ // ----- -// Test that non aligned sizes compile correctly. -#config = {tileSizes = [[2, 256, 4], [], [2, 4]]} -#executable_target_cuda_nvptx_fb = #hal.executable.target<"cuda", "cuda-nvptx-fb"> -#map0 = affine_map<()[s0] -> (s0 * 2)> -#map1 = affine_map<()[s0] -> (s0 * 256)> -#map3 = affine_map<(d0) -> (256, -d0 + 4)> -#map4 = affine_map<(d0, d1)[s0] -> (d0 * 3 + s0 + d1)> -#map5 = affine_map<(d0, d1)[s0] -> (d0 * 4 + s0 + d1)> -hal.executable @dot_dispatch_1 attributes {sym_visibility = "private"} { - hal.executable.variant @cuda, target = #executable_target_cuda_nvptx_fb { - hal.executable.entry_point @dot_dispatch_1 attributes { - interface = @legacy_io, - ordinal = 0 : index, - translation.info = { - passPipeline = 3 : i32, - workloadPerWorkgroup = [256, 2]}, - workgroup_size = [64: index, 1: index, 1: index]} - builtin.module { - builtin.func @dot_dispatch_1() { - %c0 = constant 0 : index - %c4 = constant 4 : index - %c2 = constant 2 : index - %cst = constant 0.000000e+00 : f32 - %0 = hal.interface.binding.subspan @io::@ro0[%c0] : memref<2x3xf32> - %1 = hal.interface.binding.subspan @io::@ro1[%c0] : memref<3x4xf32> - %2 = hal.interface.binding.subspan @io::@wo2[%c0] : memref<2x4xf32> - %workgroup_size_x = hal.interface.workgroup.size[0] : index - %workgroup_size_y = hal.interface.workgroup.size[1] : index - %workgroup_id_x = hal.interface.workgroup.id[0] : index - %workgroup_count_x = hal.interface.workgroup.count[0] : index - %workgroup_id_y = hal.interface.workgroup.id[1] : index - %workgroup_count_y = hal.interface.workgroup.count[1] : index - %3 = affine.apply #map0()[%workgroup_id_y] - %4 = affine.apply #map0()[%workgroup_count_y] - scf.for %arg0 = %3 to %c2 step %4 { - %5 = affine.apply #map1()[%workgroup_id_x] - %6 = affine.apply #map1()[%workgroup_count_x] - scf.for %arg1 = %5 to %c4 step %6 { - %8 = memref.subview %0[%arg0, 0] [2, 3] [1, 1] : memref<2x3xf32> to memref<2x3xf32, #map4> - %9 = affine.min #map3(%arg1)[] - %10 = memref.subview %1[0, %arg1] [3, %9] [1, 1] : memref<3x4xf32> to memref<3x?xf32, #map5> - %11 = memref.subview %2[%arg0, %arg1] [2, %9] [1, 1] : memref<2x4xf32> to memref<2x?xf32, #map5> - linalg.fill(%cst, %11) { - __internal_linalg_transform__ = "workgroup", - lowering.config = #config} : f32, memref<2x?xf32, #map5> - linalg.matmul { - __internal_linalg_transform__ = "workgroup", - lowering.config = #config} - ins(%8, %10 : memref<2x3xf32, #map4>, memref<3x?xf32, #map5>) - outs(%11 : memref<2x?xf32, #map5>) - } - } - return - } - hal.interface @legacy_io attributes {sym_visibility = "private"} { - hal.interface.binding @ro0, set=0, binding=0, type="StorageBuffer", access="Read" - hal.interface.binding @ro1, set=0, binding=1, type="StorageBuffer", access="Read" - hal.interface.binding @wo2, set=0, binding=2, type="StorageBuffer", access="Write|Discard" - } - } - } -} -// CHECK-LABEL: hal.executable @dot_dispatch_1 -// CHECK-DAG: %[[C0:.+]] = constant 0 : index -// CHECK-DAG: %[[C1:.+]] = constant 1 : index -// CHECK-DAG: %[[C2:.+]] = constant 2 : index -// CHECK-DAG: %[[C3:.+]] = constant 3 : index -// CHECK-DAG: %[[C256:.+]] = constant 256 : index -// CHECK: gpu.barrier -// CHECK: scf.for %[[IND:.+]] = %{{.*}} to %[[C2]] step %[[C2]] { -// CHECK-DAG: %[[SRC:.+]] = memref.subview %{{.*}}[%[[IND]], 0] [1, 3] [1, 1] : memref<2x3xf32, #{{.*}}> to memref<1x3xf32, #{{.*}}> -// CHECK-DAG: %[[DST:.+]] = memref.subview %{{.*}}[%[[IND]], 0] [1, 3] [1, 1] : memref<2x3xf32, #{{.*}}, 3> to memref<1x3xf32, #{{.*}}, 3> -// CHECK: linalg.copy(%[[SRC]], %[[DST]]) {__internal_linalg_transform__ = "vectorize"} : memref<1x3xf32, #{{.*}}>, memref<1x3xf32, #{{.*}}, 3> -// CHECK: } -// CHECK: scf.for %[[IND0:.+]] = %{{.*}} to %[[C3]] step %[[C1]] { -// CHECK: scf.for %[[IND1:.+]] = %{{.*}} to %{{.*}} step %[[C256]] { -// CHECK-DAG: %[[SRC:.+]] = memref.subview %{{.*}}[%[[IND0]], %[[IND1]]] [1, 4] [1, 1] : memref<{{.*}}> to memref<1x4xf32, #{{.*}}> -// CHECK-DAG: %[[DST:.+]] = memref.subview %{{.*}}[%[[IND0]], %[[IND1]]] [1, 4] [1, 1] : memref<{{.*}}, 3> to memref<1x4xf32, #{{.*}}, 3> -// CHECK: linalg.copy(%[[SRC]], %[[DST]]) {__internal_linalg_transform__ = "vectorize"} : memref<1x4xf32, #{{.*}}>, memref<1x4xf32, #{{.*}}, 3> -// CHECK: } -// CHECK: } -// ----- - #config = {tileSizes = [[]]} // Pure reducion case, skip tiling. hal.executable @reduction_dispatch {
diff --git a/iree/compiler/Codegen/LLVMGPU/test/distribute_wg_copy.mlir b/iree/compiler/Codegen/LLVMGPU/test/distribute_wg_copy.mlir new file mode 100644 index 0000000..5a65678 --- /dev/null +++ b/iree/compiler/Codegen/LLVMGPU/test/distribute_wg_copy.mlir
@@ -0,0 +1,67 @@ +// RUN: iree-opt -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(builtin.func(iree-llvmgpu-distribute-shared-memory-copy))))' -cse %s | IreeFileCheck %s + +// CHECK-DAG: #[[$MAP0:.*]] = affine_map<()[s0, s1, s2] -> (s1 * 8 + s2 * 32 + s0 floordiv 4)> +// CHECK-DAG: #[[$MAP1:.*]] = affine_map<()[s0] -> (s0 * 4 - (s0 floordiv 4) * 16)> +// CHECK-DAG: #[[$MAP2:.*]] = affine_map<()[s0, s1, s2] -> (s1 * 8 + s2 * 32 + s0 floordiv 4 + 32)> +// CHECK-DAG: #[[$MAP3:.*]] = affine_map<()[s0, s1, s2] -> (s0 + s1 * 32 + s2 * 128)> +// CHECK-DAG: #[[$MAP4:.*]] = affine_map<()[s0, s1, s2] -> (s0 + s1 * 32 + s2 * 128 + 128)> +// CHECK-DAG: #[[$MAP5:.*]] = affine_map<()[s0, s1, s2] -> (s0 * 4 + s1 * 128 + s2 * 512)> + +hal.executable @shared_mem_cpy attributes {sym_visibility = "private"} { + hal.executable.variant @cuda, target = #hal.executable.target<"cuda", "cuda-nvptx-fb"> { + hal.executable.entry_point @shared_mem_cpy attributes { + interface = @io, + ordinal = 0 : index, + workgroup_size = [32: index, 4: index, 1:index]} + builtin.module { + memref.global "private" @__shared_memory___1 : memref<3x512xf32, 3> + memref.global "private" @__shared_memory___0 : memref<256x4xf32, 3> + memref.global "private" @__shared_memory__ : memref<64x16xf32, 3> + // CHECK-LABEL: builtin.func @shared_mem_cpy( + builtin.func @shared_mem_cpy( + %m0 : memref<64x16xf32>, %m1 : memref<256x4xf32>, %m2 : memref<3x512xf32>) { + %sm0 = memref.get_global @__shared_memory__ : memref<64x16xf32, 3> + %sm1 = memref.get_global @__shared_memory___0 : memref<256x4xf32, 3> + %sm2 = memref.get_global @__shared_memory___1 : memref<3x512xf32, 3> + gpu.barrier + // CHECK-DAG: %[[C2:.*]] = constant 2 : index + // CHECK-DAG: %[[C1:.*]] = constant 1 : index + // CHECK-DAG: %[[C0:.*]] = constant 0 : index + // CHECK-DAG: %[[TX:.*]] = "gpu.thread_id"() {dimension = "x"} : () -> index + // CHECK-DAG: %[[TY:.*]] = "gpu.thread_id"() {dimension = "y"} : () -> index + // CHECK-DAG: %[[TZ:.*]] = "gpu.thread_id"() {dimension = "z"} : () -> index + + // CHECK-DAG: %[[Y0:.*]] = affine.apply #[[$MAP0]]()[%[[TX]], %[[TY]], %[[TZ]]] + // CHECK-DAG: %[[X0:.*]] = affine.apply #[[$MAP1]]()[%[[TX]]] + // CHECK: %[[R0:.*]] = vector.transfer_read %{{.*}}[%[[Y0]], %[[X0]]], %{{.*}} {in_bounds = [true, true]} : memref<64x16xf32>, vector<1x4xf32> + // CHECK-DAG: %[[Y1:.*]] = affine.apply #[[$MAP2]]()[%[[TX]], %[[TY]], %[[TZ]]] + // CHECK: %[[R1:.*]] = vector.transfer_read %{{.*}}[%[[Y1]], %[[X0]]], %{{.*}} {in_bounds = [true, true]} : memref<64x16xf32>, vector<1x4xf32> + // CHECK: vector.transfer_write %[[R0]], %{{.*}}[%[[Y0]], %[[X0]]] {in_bounds = [true, true]} : vector<1x4xf32>, memref<64x16xf32, 3> + // CHECK: vector.transfer_write %[[R1]], %{{.*}}[%[[Y1]], %[[X0]]] {in_bounds = [true, true]} : vector<1x4xf32>, memref<64x16xf32, 3> + + linalg.copy(%m0, %sm0) {__internal_linalg_transform__ = "copy_to_workgroup_memory"} : memref<64x16xf32>, memref<64x16xf32, 3> + + // CHECK: %[[Y1:.*]] = affine.apply #[[$MAP3]]()[%[[TX]], %[[TY]], %[[TZ]]] + // CHECK: %[[R2:.*]] = vector.transfer_read %{{.*}}[%[[Y1]], %[[C0]]], %{{.*}} {in_bounds = [true, true]} : memref<256x4xf32>, vector<1x4xf32> + // CHECK: %[[Y2:.*]] = affine.apply #[[$MAP4]]()[%[[TX]], %[[TY]], %[[TZ]]] + // CHECK: %[[R3:.*]] = vector.transfer_read %{{.*}}[%[[Y2]], %[[C0]]], %{{.*}} {in_bounds = [true, true]} : memref<256x4xf32>, vector<1x4xf32> + // CHECK: vector.transfer_write %[[R2]], %{{.*}}[%[[Y1]], %[[C0]]] {in_bounds = [true, true]} : vector<1x4xf32>, memref<256x4xf32, 3> + // CHECK: vector.transfer_write %[[R3]], %{{.*}}[%[[Y2]], %[[C0]]] {in_bounds = [true, true]} : vector<1x4xf32>, memref<256x4xf32, 3> + + linalg.copy(%m1, %sm1) {__internal_linalg_transform__ = "copy_to_workgroup_memory"} : memref<256x4xf32>, memref<256x4xf32, 3> + + // CHECK: %[[X1:.*]] = affine.apply #[[$MAP5]]()[%[[TX]], %[[TY]], %[[TZ]]] + // CHECK: %[[R4:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[X1]]], %{{.*}} {in_bounds = [true, true]} : memref<3x512xf32>, vector<1x4xf32> + // CHECK: %[[R5:.*]] = vector.transfer_read %{{.*}}[%[[C1]], %[[X1]]], %{{.*}} {in_bounds = [true, true]} : memref<3x512xf32>, vector<1x4xf32> + // CHECK: %[[R6:.*]] = vector.transfer_read %{{.*}}[%[[C2]], %[[X1]]], %{{.*}} {in_bounds = [true, true]} : memref<3x512xf32>, vector<1x4xf32> + // CHECK: vector.transfer_write %[[R4]], %{{.*}}[%c0, %15] {in_bounds = [true, true]} : vector<1x4xf32>, memref<3x512xf32, 3> + // CHECK: vector.transfer_write %[[R5]], %{{.*}}[%c1, %15] {in_bounds = [true, true]} : vector<1x4xf32>, memref<3x512xf32, 3> + // CHECK: vector.transfer_write %[[R6]], %{{.*}}[%c2, %15] {in_bounds = [true, true]} : vector<1x4xf32>, memref<3x512xf32, 3> + + linalg.copy(%m2, %sm2) {__internal_linalg_transform__ = "copy_to_workgroup_memory"} : memref<3x512xf32>, memref<3x512xf32, 3> + gpu.barrier + return + } + } + } +}
diff --git a/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir b/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir index 65818ae..4f8c6c6 100644 --- a/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir +++ b/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir
@@ -46,7 +46,7 @@ // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0)[s0] -> (d0 + s0)> // CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0) -> (d0)> // CHECK: hal.executable.entry_point @add_dispatch_0 -// CHECK-SAME: passPipeline = 2 : i32 +// CHECK-SAME: passPipeline = 3 : i32 // CHECK-SAME: workloadPerWorkgroup = [128] // CHECK-SAME: workgroup_size = [32 : index, 1 : index, 1 : index] // CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: index, @@ -106,7 +106,7 @@ // CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0] -> (s0 ceildiv 256)> // CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0] -> (s0 ceildiv 2)> // CHECK: hal.executable.entry_point @dot_dispatch_1 -// CHECK-SAME: passPipeline = 3 : i32 +// CHECK-SAME: passPipeline = 4 : i32 // CHECK-SAME: workloadPerWorkgroup = [256, 2] // CHECK-SAME: workgroup_size = [64 : index, 1 : index, 1 : index] // CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: index, %[[ARG1:[a-zA-Z0-9]+]]: index,
diff --git a/iree/compiler/Codegen/LLVMGPU/test/nvvm_pipeline_test.mlir b/iree/compiler/Codegen/LLVMGPU/test/nvvm_pipeline_test.mlir index 73bd058..986a8bf 100644 --- a/iree/compiler/Codegen/LLVMGPU/test/nvvm_pipeline_test.mlir +++ b/iree/compiler/Codegen/LLVMGPU/test/nvvm_pipeline_test.mlir
@@ -99,15 +99,15 @@ } } -// CHECK-LABEL: hal.executable @dot_dispatch_0 -// CHECK: hal.executable.variant @cuda -// CHECK-NOT: llvm.store -// CHECK-COUNT-2: llvm.load {{.*}} : !llvm.ptr<vector<4xf32>> -// CHECK: llvm.br -// CHECK-COUNT-6: llvm.load {{.*}} : !llvm.ptr<vector<4xf32>, 3> -// CHECK-COUNT-8: "llvm.intr.fmuladd"({{.*}}) : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32> -// CHECK: llvm.br -// CHECK-COUNT-2: llvm.store {{.*}} : !llvm.ptr<vector<4xf32>> +// CHECK-LABEL: hal.executable @dot_dispatch_0 +// CHECK: hal.executable.variant @cuda +// CHECK-NOT: llvm.store +// CHECK: llvm.br +// CHECK-COUNT-3: llvm.load {{.*}} : !llvm.ptr<vector<4xf32>> +// CHECK-COUNT-32: llvm.load {{.*}} : !llvm.ptr<vector<4xf32>, 3> +// CHECK-COUNT-128: "llvm.intr.fmuladd"({{.*}}) : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32> +// CHECK: llvm.br +// CHECK-COUNT-16: llvm.store {{.*}} : !llvm.ptr<vector<4xf32>> // ----- @@ -350,3 +350,102 @@ // CHECK-LABEL: hal.executable @reduction_dispatch // CHECK: hal.executable.variant @cuda // CHECK: llvm.fadd + +// ----- + +hal.executable @vector_add_dispatch { +hal.executable.variant @cuda, target = #hal.executable.target<"cuda", "cuda-nvptx-fb"> { + hal.executable.entry_point @vector_add_dispatch attributes {interface = @io, ordinal = 0 : index} + builtin.module { + builtin.func @vector_add_dispatch() { + %c0 = constant 0 : index + %c128 = constant 128 : index + %0 = hal.interface.binding.subspan @io::@s0b0_ro_external[%c0] : !flow.dispatch.tensor<readonly:128xf32> + %1 = hal.interface.binding.subspan @io::@s0b1_ro_external[%c0] : !flow.dispatch.tensor<readonly:128xf32> + %2 = hal.interface.binding.subspan @io::@s0b2_xw_external[%c0] : !flow.dispatch.tensor<writeonly:128xf32> + %workgroup_size_x = hal.interface.workgroup.size[0] : index + %workgroup_id_x = hal.interface.workgroup.id[0] : index + %workgroup_count_x = hal.interface.workgroup.count[0] : index + %3 = affine.apply affine_map<()[s0, s1] -> (s0 * s1)>()[%workgroup_id_x, %workgroup_size_x] + %4 = affine.apply affine_map<()[s0, s1] -> (s0 * s1)>()[%workgroup_count_x, %workgroup_size_x] + scf.for %arg0 = %3 to %c128 step %4 { + %5 = affine.min affine_map<(d0, d1) -> (d1, -d0 + 128)>(%arg0)[%workgroup_size_x] + %6 = flow.dispatch.tensor.load %0, offsets = [%arg0], sizes = [%5], strides = [1] : !flow.dispatch.tensor<readonly:128xf32> -> tensor<?xf32> + %7 = affine.min affine_map<(d0, d1) -> (d1, -d0 + 128)>(%arg0)[%workgroup_size_x] + %8 = flow.dispatch.tensor.load %1, offsets = [%arg0], sizes = [%7], strides = [1] : !flow.dispatch.tensor<readonly:128xf32> -> tensor<?xf32> + %9 = affine.min affine_map<(d0, d1) -> (d1, -d0 + 128)>(%arg0)[%workgroup_size_x] + %10 = linalg.init_tensor [%9] : tensor<?xf32> + %11 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%6, %8 : tensor<?xf32>, tensor<?xf32>) outs(%10 : tensor<?xf32>) attrs = {__internal_linalg_transform__ = "workgroup"} { + ^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors + %12 = addf %arg1, %arg2 : f32 + linalg.yield %12 : f32 + } -> tensor<?xf32> + flow.dispatch.tensor.store %11, %2, offsets = [%arg0], sizes = [%9], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:128xf32> + } + return + } + hal.interface @io attributes {sym_visibility = "private"} { + hal.interface.binding @s0b0_ro_external, set=0, binding=0, type="StorageBuffer", access="Read" + hal.interface.binding @s0b1_ro_external, set=0, binding=1, type="StorageBuffer", access="Read" + hal.interface.binding @s0b2_xw_external, set=0, binding=2, type="StorageBuffer", access="Write|Discard" + } + } +} +} + +// CHECK-LABEL: hal.executable @vector_add_dispatch +// CHECK: hal.executable.variant @cuda +// CHECK: llvm.fadd %{{.*}}, %{{.*}} : vector<4xf32 +// CHECK: llvm.store %{{.*}} : !llvm.ptr<vector<4xf32>> + +// ----- + +#map0 = affine_map<()[s0, s1] -> (s0 * s1)> +#map1 = affine_map<(d0)[s0] -> (s0, -d0 + 768)> +#map2 = affine_map<(d0)[s0] -> (-d0 + 768, s0)> +#map3 = affine_map<(d0, d1) -> (d1, d0)> +#map4 = affine_map<(d0, d1) -> (d0)> + +hal.executable @vector_reduction_dispatch { +hal.executable.variant @cuda, target = #hal.executable.target<"cuda", "cuda-nvptx-fb"> { + hal.executable.entry_point @vector_reduction_dispatch attributes {interface = @io, ordinal = 0 : index} + builtin.module { + builtin.func @vector_reduction_dispatch() { + %c0 = constant 0 : index + %c768 = constant 768 : index + %cst = constant 1.000000e+00 : f32 + %0 = hal.interface.binding.subspan @io::@s0b0_ro_external[%c0] : !flow.dispatch.tensor<readonly:512x768xf32> + %1 = hal.interface.binding.subspan @io::@s0b1_xw_external[%c0] : !flow.dispatch.tensor<writeonly:768xf32> + %workgroup_size_x = hal.interface.workgroup.size[0] : index + %workgroup_id_x = hal.interface.workgroup.id[0] : index + %workgroup_count_x = hal.interface.workgroup.count[0] : index + %2 = affine.apply #map0()[%workgroup_id_x, %workgroup_size_x] + %3 = affine.apply #map0()[%workgroup_count_x, %workgroup_size_x] + scf.for %arg0 = %2 to %c768 step %3 { + %4 = affine.min #map1(%arg0)[%workgroup_size_x] + %5 = flow.dispatch.tensor.load %0, offsets = [0, %arg0], sizes = [512, %4], strides = [1, 1] : !flow.dispatch.tensor<readonly:512x768xf32> -> tensor<512x?xf32> + %6 = affine.min #map1(%arg0)[%workgroup_size_x] + %7 = affine.min #map2(%arg0)[%workgroup_size_x] + %8 = linalg.init_tensor [%7] : tensor<?xf32> + %9 = linalg.fill(%cst, %8) : f32, tensor<?xf32> -> tensor<?xf32> + %10 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "reduction"]} ins(%5 : tensor<512x?xf32>) outs(%9 : tensor<?xf32>) attrs = {__internal_linalg_transform__ = "workgroup"} { + ^bb0(%arg1: f32, %arg2: f32): // no predecessors + %11 = addf %arg1, %arg2 : f32 + linalg.yield %11 : f32 + } -> tensor<?xf32> + flow.dispatch.tensor.store %10, %1, offsets = [%arg0], sizes = [%6], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:768xf32> + } + return + } + hal.interface @io attributes {sym_visibility = "private"} { + hal.interface.binding @s0b0_ro_external, set=0, binding=0, type="StorageBuffer", access="Read" + hal.interface.binding @s0b1_xw_external, set=0, binding=1, type="StorageBuffer", access="Write|Discard" + } + } + } +} + +// CHECK-LABEL: hal.executable @vector_reduction_dispatch +// CHECK: hal.executable.variant @cuda +// CHECK-COUNT-4: llvm.fadd +// CHECK: llvm.store %{{.*}} : !llvm.ptr<vector<4xf32>>
diff --git a/iree/compiler/Codegen/LLVMGPU/test/rocdl_pipeline_test.mlir b/iree/compiler/Codegen/LLVMGPU/test/rocdl_pipeline_test.mlir index 80d283c..64511a0 100644 --- a/iree/compiler/Codegen/LLVMGPU/test/rocdl_pipeline_test.mlir +++ b/iree/compiler/Codegen/LLVMGPU/test/rocdl_pipeline_test.mlir
@@ -101,9 +101,10 @@ // CHECK-LABEL: hal.executable @dot_dispatch_0 // CHECK: hal.executable.variant @rocm -// CHECK-COUNT-2: llvm.load {{.*}} : !llvm.ptr<vector<4xf32>> -// CHECK: llvm.br -// CHECK-COUNT-6: llvm.load {{.*}} : !llvm.ptr<vector<4xf32>, 3> -// CHECK-COUNT-8: "llvm.intr.fmuladd"({{.*}}) : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32> -// CHECK: llvm.br -// CHECK-COUNT-2: llvm.store {{.*}} : !llvm.ptr<vector<4xf32>> +// CHECK-NOT: llvm.store +// CHECK: llvm.br +// CHECK-COUNT-3: llvm.load {{.*}} : !llvm.ptr<vector<4xf32>> +// CHECK-COUNT-32: llvm.load {{.*}} : !llvm.ptr<vector<4xf32>, 3> +// CHECK-COUNT-128: "llvm.intr.fmuladd"({{.*}}) : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32> +// CHECK: llvm.br +// CHECK-COUNT-16: llvm.store {{.*}} : !llvm.ptr<vector<4xf32>>
diff --git a/iree/compiler/Codegen/Passes.h b/iree/compiler/Codegen/Passes.h index 6d9ae4b..4ecc1f4 100644 --- a/iree/compiler/Codegen/Passes.h +++ b/iree/compiler/Codegen/Passes.h
@@ -201,12 +201,15 @@ /// Lowering calling vectorization patterns. Expects pass manager to be a /// module-level pass manager. -void addGPUVectorizationPassPipeline(OpPassManager &passManager); +void addGPUVectorizationPassPipeline(OpPassManager &pm); + +/// Lowering calling vectorization patterns. +void addGPUMatmulSimtPassPipeline(OpPassManager &pm); /// Simple lowering only distributute linalg ops on blocks and threads. This /// will result in scalar operations. Expects pass manager to be a module-level /// pass manager. -void addGPUSimpleDistributePassPipeline(OpPassManager &passManager); +void addGPUSimpleDistributePassPipeline(OpPassManager &pm); /// Populates passes needed to lower a XLA HLO op to NVVM/ROCDL dialect via the /// structured ops path. The pass manager `pm` in here should operate on the @@ -236,6 +239,10 @@ /// Lower vector ops before convertion to LLVM. std::unique_ptr<OperationPass<FuncOp>> createLLVMGPUVectorLoweringPass(); +/// Convert shared memory copies to distributed transfer_read/transfer_write. +std::unique_ptr<OperationPass<FuncOp>> +createLLVMGPUDistributeSharedMemoryCopy(); + //------------------------------------------------------------------------------ // SPIRV Passes //------------------------------------------------------------------------------ @@ -281,9 +288,8 @@ std::unique_ptr<OperationPass<FuncOp>> createSPIRVVectorToCooperativeMatrixPass(); -/// Pass to convert vector operations to GPU level operations. Instructions of -/// vector size equal to subgroup size are distributed across the subgroup. -std::unique_ptr<OperationPass<FuncOp>> createSPIRVVectorToGPUPass(); +/// Pass to lower linalg.copy for copying data to workgroup memory. +std::unique_ptr<OperationPass<FuncOp>> createSPIRVCopyToWorkgroupMemoryPass(); /// Converts memref of scalar to memref of vector of efficent size. This will /// allow to convert memory accesses to vector load/store in SPIR-V without @@ -312,10 +318,6 @@ // SPIRV Codegen specific patterns. //----------------------------------------------------------------------------// -/// Populates patterns to tile and distribute linalg.copy operations. -void populateTileAndDistributeLinalgCopyPatterns( - MLIRContext *context, OwningRewritePatternList &patterns); - /// Populates patterns to fold processor ID uses by using processor counts /// information where possible. void populateFoldGPUProcessorIDUsesPatterns(MLIRContext *context,
diff --git a/iree/compiler/Codegen/Passes.td b/iree/compiler/Codegen/Passes.td index 328515c..4481d70 100644 --- a/iree/compiler/Codegen/Passes.td +++ b/iree/compiler/Codegen/Passes.td
@@ -193,6 +193,12 @@ let constructor = "mlir::iree_compiler::createLLVMGPUVectorLoweringPass()"; } +def LLVMGPUDistributeSharedMemoryCopy : + Pass<"iree-llvmgpu-distribute-shared-memory-copy", "FuncOp"> { + let summary = "Pass to distribute shared memory copies to threads."; + let constructor = "mlir::iree_compiler::createLLVMGPUDistributeSharedMemoryCopy()"; +} + //------------------------------------------------------------------------------ // SPIRV //------------------------------------------------------------------------------ @@ -255,10 +261,10 @@ } // TODO: Rename argument to be fully qualified. -def SPIRVVectorToGPU : - Pass<"iree-spirv-vector-to-gpu", "FuncOp"> { +def SPIRVCopyToWorkgroupMemory : + Pass<"iree-spirv-copy-to-workgroup-memory", "FuncOp"> { let summary = "Convert vector dialect to gpu subgroup level GPU instructions"; - let constructor = "mlir::iree_compiler::createSPIRVVectorToGPUPass()"; + let constructor = "mlir::iree_compiler::createSPIRVCopyToWorkgroupMemoryPass()"; } //------------------------------------------------------------------------------
diff --git a/iree/compiler/Codegen/SPIRV/BUILD b/iree/compiler/Codegen/SPIRV/BUILD index 5ec14cf..d9533b1 100644 --- a/iree/compiler/Codegen/SPIRV/BUILD +++ b/iree/compiler/Codegen/SPIRV/BUILD
@@ -20,10 +20,10 @@ "Passes.cpp", "SPIRVConcretizeWorkgroupTiles.cpp", "SPIRVConvertToGPU.cpp", + "SPIRVCopyToWorkgroupMemory.cpp", "SPIRVFoldGPUProcessorIDUses.cpp", "SPIRVTileAndVectorize.cpp", "SPIRVVectorToCooperativeMatrix.cpp", - "SPIRVVectorToGPU.cpp", "SPIRVVectorizeLoadStore.cpp", "Utils.cpp", ], @@ -41,9 +41,9 @@ "//iree/compiler/Dialect/Flow/IR", "//iree/compiler/Dialect/HAL/IR", "//iree/compiler/Dialect/HAL/IR:HALDialect", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Shape/IR", "//iree/compiler/Dialect/Shape/Transforms", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:Affine", "@llvm-project//mlir:AffineToStandardTransforms",
diff --git a/iree/compiler/Codegen/SPIRV/CMakeLists.txt b/iree/compiler/Codegen/SPIRV/CMakeLists.txt index 31c4888..d5411b6 100644 --- a/iree/compiler/Codegen/SPIRV/CMakeLists.txt +++ b/iree/compiler/Codegen/SPIRV/CMakeLists.txt
@@ -26,10 +26,10 @@ "Passes.cpp" "SPIRVConcretizeWorkgroupTiles.cpp" "SPIRVConvertToGPU.cpp" + "SPIRVCopyToWorkgroupMemory.cpp" "SPIRVFoldGPUProcessorIDUses.cpp" "SPIRVTileAndVectorize.cpp" "SPIRVVectorToCooperativeMatrix.cpp" - "SPIRVVectorToGPU.cpp" "SPIRVVectorizeLoadStore.cpp" "Utils.cpp" DEPS @@ -70,9 +70,9 @@ iree::compiler::Dialect::Flow::IR iree::compiler::Dialect::HAL::IR iree::compiler::Dialect::HAL::IR::HALDialect - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Shape::IR iree::compiler::Dialect::Shape::Transforms + iree::compiler::Dialect::Util::IR tensorflow::mlir_hlo PUBLIC )
diff --git a/iree/compiler/Codegen/SPIRV/ConvertToSPIRVPass.cpp b/iree/compiler/Codegen/SPIRV/ConvertToSPIRVPass.cpp index c3aa239..52736e8 100644 --- a/iree/compiler/Codegen/SPIRV/ConvertToSPIRVPass.cpp +++ b/iree/compiler/Codegen/SPIRV/ConvertToSPIRVPass.cpp
@@ -19,7 +19,7 @@ #include "iree/compiler/Codegen/Passes.h" #include "iree/compiler/Codegen/Utils/MarkerUtils.h" #include "iree/compiler/Dialect/HAL/IR/HALOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/STLExtras.h"
diff --git a/iree/compiler/Codegen/SPIRV/KernelDispatchUtils.cpp b/iree/compiler/Codegen/SPIRV/KernelDispatchUtils.cpp index 98b065c..0296257 100644 --- a/iree/compiler/Codegen/SPIRV/KernelDispatchUtils.cpp +++ b/iree/compiler/Codegen/SPIRV/KernelDispatchUtils.cpp
@@ -19,8 +19,8 @@ #include "iree/compiler/Codegen/SPIRV/LaunchConfig.h" #include "iree/compiler/Codegen/SPIRV/Utils.h" #include "iree/compiler/Codegen/Utils/Utils.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" #include "iree/compiler/Dialect/Shape/IR/ShapeOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "llvm/Support/Debug.h" #include "mlir/Analysis/SliceAnalysis.h" #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
diff --git a/iree/compiler/Codegen/SPIRV/Passes.cpp b/iree/compiler/Codegen/SPIRV/Passes.cpp index 806e205..3c122c7 100644 --- a/iree/compiler/Codegen/SPIRV/Passes.cpp +++ b/iree/compiler/Codegen/SPIRV/Passes.cpp
@@ -65,8 +65,9 @@ // vectorization flow. Only perform one level of distribution to map them to // GPU global invocation IDs for distribution. // TODO(antiagainst): Handle all the cases uniformly and remove this pass. + pm.nest<ModuleOp>().addNestedPass<FuncOp>( + createSPIRVCopyToWorkgroupMemoryPass()); pm.addPass(createSPIRVConvertToGPUPass()); - pm.nest<ModuleOp>().addNestedPass<FuncOp>(createSPIRVVectorToGPUPass()); pm.nest<ModuleOp>().addPass(createLowerAffinePass()); pm.nest<ModuleOp>().addPass(createCanonicalizerPass()); pm.nest<ModuleOp>().addPass(createCSEPass());
diff --git a/iree/compiler/Codegen/SPIRV/SPIRVConvertToGPU.cpp b/iree/compiler/Codegen/SPIRV/SPIRVConvertToGPU.cpp index e154864..ed4aad3 100644 --- a/iree/compiler/Codegen/SPIRV/SPIRVConvertToGPU.cpp +++ b/iree/compiler/Codegen/SPIRV/SPIRVConvertToGPU.cpp
@@ -16,7 +16,6 @@ #include "iree/compiler/Codegen/PassDetail.h" #include "iree/compiler/Codegen/Passes.h" #include "iree/compiler/Codegen/SPIRV/KernelDispatchUtils.h" -#include "iree/compiler/Codegen/SPIRV/MemorySpace.h" #include "iree/compiler/Codegen/SPIRV/Utils.h" #include "iree/compiler/Codegen/Transforms/Transforms.h" #include "iree/compiler/Codegen/Utils/MarkerUtils.h" @@ -30,11 +29,11 @@ #include "mlir/Dialect/Linalg/Transforms/Transforms.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/SCF/SCF.h" -#include "mlir/Dialect/SPIRV/IR/SPIRVOps.h" #include "mlir/Dialect/SPIRV/IR/TargetAndABI.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/AffineMap.h" #include "mlir/IR/FunctionSupport.h" +#include "mlir/IR/PatternMatch.h" #include "mlir/Support/LLVM.h" #include "mlir/Transforms/DialectConversion.h" #include "mlir/Transforms/LoopUtils.h" @@ -46,102 +45,6 @@ // Loop utilities //===----------------------------------------------------------------------===// -/// Builds an empty scf.for operation. The default builder adds an entry basic -/// block which needs to be avoided here. -static scf::ForOp buildEmptyForOp(Location loc, OpBuilder &builder, Value lb, - Value ub, Value step) { - OperationState state(loc, scf::ForOp::getOperationName()); - state.addOperands({lb, ub, step}); - state.addRegion(); - return cast<scf::ForOp>(builder.createOperation(state)); -} - -/// Builds an empty scf.if operation without the then and else blocks. -static scf::IfOp buildEmptyIfOp(Location loc, OpBuilder &builder, Value cond) { - OperationState state(loc, scf::IfOp::getOperationName()); - state.addOperands(cond); - state.addRegion(); - state.addRegion(); - return cast<scf::IfOp>(builder.createOperation(state)); -} - -namespace { -struct LoopBounds { - Value lb; - Value ub; - Value step; -}; -} // namespace - -/// Replaces a scf.parallelOp with an optional scf.parallel op and nested -/// scf.for operations. To create the scf.parallel op as the outermost loop, -/// pass the lower bound, upper bound and steps in `newPLoopLbs`, `newPLoopUbs`, -/// and `newPLoopStep` respectively. The bounds of the inner scf.for operations -/// to be created are passed in `forLbs`, `forUbs`, and `forStep`. The -/// `permutation` vector contains a mapping from the original loop order, to the -/// loop order to be generated. -static Operation *replacePLoopOp(ConversionPatternRewriter &rewriter, - scf::ParallelOp pLoopOp, - ArrayRef<LoopBounds> newPLoopBounds, - ArrayRef<LoopBounds> forBounds, - ArrayRef<unsigned> permutation) { - assert(!forBounds.empty() && "unhandled case of no scf.for created"); - unsigned numLoops = pLoopOp.getNumLoops(); - Location loc = pLoopOp.getLoc(); - assert(forBounds.size() + newPLoopBounds.size() == numLoops && - "cannot drop loops when splitting scf.parallel operation"); - assert(permutation.size() == numLoops); - OpBuilder::InsertionGuard guard(rewriter); - - // Need a signature conversion for the body of the scf.parallel operation, - // before can it can be used as the body of the innermost loop created here. - TypeConverter::SignatureConversion signatureConverter(numLoops); - Operation *outermostLoop = nullptr; - auto permuteIt = permutation.begin(); - - // Create the scf.parallel operation as the outermost loop, if specified. - if (!newPLoopBounds.empty()) { - auto lbs = llvm::to_vector<2>(llvm::map_range( - newPLoopBounds, [](LoopBounds bounds) -> Value { return bounds.lb; })); - auto ubs = llvm::to_vector<2>(llvm::map_range( - newPLoopBounds, [](LoopBounds bounds) { return bounds.ub; })); - auto steps = llvm::to_vector<2>(llvm::map_range( - newPLoopBounds, [](LoopBounds bounds) { return bounds.step; })); - auto newPLoop = rewriter.create<scf::ParallelOp>(loc, lbs, ubs, steps); - for (auto iv : newPLoop.getInductionVars()) { - signatureConverter.remapInput(*permuteIt, iv); - permuteIt++; - } - rewriter.setInsertionPointToStart(newPLoop.getBody()); - outermostLoop = newPLoop.getOperation(); - } - - // Generate the nested scf.for operations with the bounds passed. - for (auto it : enumerate(forBounds)) { - Value lb = it.value().lb, ub = it.value().ub, step = it.value().step; - if (it.index() != forBounds.size() - 1) { - auto forOp = rewriter.create<scf::ForOp>(loc, lb, ub, step); - if (!outermostLoop) outermostLoop = forOp.getOperation(); - signatureConverter.remapInput(*permuteIt, forOp.getInductionVar()); - rewriter.setInsertionPointToStart(forOp.getBody()); - } else { - // For the last loop, move the body of the scf.parallel op as the body of - // the loop after signature conversion. - auto forOp = buildEmptyForOp(loc, rewriter, lb, ub, step); - if (!outermostLoop) outermostLoop = forOp.getOperation(); - signatureConverter.addInputs(*permuteIt, rewriter.getIndexType()); - Region &pLoopOpRegion = pLoopOp.getLoopBody(); - rewriter.applySignatureConversion(&pLoopOpRegion, signatureConverter); - Region &forOpRegion = forOp.getLoopBody(); - rewriter.inlineRegionBefore(pLoopOpRegion, forOpRegion, - forOpRegion.begin()); - } - permuteIt++; - } - rewriter.eraseOp(pLoopOp); - return outermostLoop; -} - /// Serializes the dimensions of the scf.parallel specified in /// `serializedDimensions`, by creating an nested scf.for operation for each /// dimension. @@ -188,200 +91,10 @@ return serializeDimensions(rewriter, pLoopOp, serializedDimensions); } -/// Collapses all loops in a scf.parallel into one scf.parallel operation. This -/// is done by -/// 1) Normalize the loop bounds to be [0, (ub - lb) / step) -/// 2) Compute the total number of iterations. -/// 3) From the induction variable of the modified loop, compute the values of -/// the original induction variables by de-linearization. -scf::ParallelOp collapseParallelLoops(ConversionPatternRewriter &rewriter, - scf::ParallelOp pLoopOp) { - if (pLoopOp.getNumReductions()) return nullptr; - - unsigned numLoops = pLoopOp.getNumLoops(); - if (numLoops == 1) return pLoopOp; - - // Compute the number of iterations of each loops starting from the innermost. - Location loc = pLoopOp.getLoc(); - Value totalNumIterations = rewriter.create<ConstantIndexOp>(loc, 1); - - // Track the "stride" of each loop, i.e. product of the total number of - // iterations of the inner loops. - SmallVector<Value, 2> iterationStride; - iterationStride.resize(pLoopOp.getNumLoops()); - auto lbs = pLoopOp.lowerBound(); - auto ubs = pLoopOp.upperBound(); - auto steps = pLoopOp.step(); - for (int i = numLoops - 1; i >= 0; --i) { - Value lb = lbs[i], ub = ubs[i], step = steps[i]; - Value iterCount = rewriter.create<SignedDivIOp>( - loc, rewriter.create<SubIOp>(loc, ub, lb), step); - iterationStride[i] = totalNumIterations; - totalNumIterations = - rewriter.create<MulIOp>(loc, totalNumIterations, iterCount); - } - - // Create the collapsed parallel loop op with lowerbound 0, step 1 and upper - // bound being the totalNumIterations. - Value newLb = rewriter.create<ConstantIndexOp>(loc, 0); - Value newStep = rewriter.create<ConstantIndexOp>(loc, 1); - scf::ParallelOp newPLoopOp = - rewriter.create<scf::ParallelOp>(loc, newLb, totalNumIterations, newStep); - - // Build the body of the collapsed loop by cloning the original loop body. The - // replacement value of the induction variables of the original loop body, - // from the induction variable of the new loop, using - // origLoopIv[i] = loopIv / iterationStride[i] - // loopIv = loopIv % iterationStride[i] - OpBuilder::InsertionGuard guard(rewriter); - Block &pLoopBody = pLoopOp.getLoopBody().front(); - rewriter.setInsertionPointToStart(&newPLoopOp.getLoopBody().front()); - Value loopIv = *newPLoopOp.getInductionVars().begin(); - BlockAndValueMapping map; - for (int i : llvm::seq<int>(0, numLoops)) { - Value iterNum = - rewriter.create<SignedDivIOp>(loc, loopIv, iterationStride[i]); - AffineExpr d0, d1; - bindDims(rewriter.getContext(), d0, d1); - AffineExpr s0 = getAffineSymbolExpr(0, rewriter.getContext()); - Value newIv = makeComposedAffineApply(rewriter, loc, d0 + d1 * s0, - {lbs[i], iterNum, steps[i]}); - map.map(pLoopBody.getArgument(i), newIv); - loopIv = rewriter.create<SignedRemIOp>(loc, loopIv, iterationStride[i]); - } - for (Operation &op : pLoopBody.without_terminator()) { - rewriter.clone(op, map); - } - rewriter.eraseOp(pLoopOp); - return newPLoopOp; -} - //===----------------------------------------------------------------------===// // GPU processor ID mapping utilities //===----------------------------------------------------------------------===// -/// Distributes scf.parallel to processors with the processors logically -/// arranged with same dimensionality as the number of loops, i.e. a -/// scf.parallel with 2 loops to a 2D grid of processors. `processorIDs` and -/// `numProcessors` must be of same size as the number of loops and are the -/// values to use for process ID and number of processors along each dimension -/// in the distributed code. -/// This method accounts for the case where the number of processors is not -/// enough to execute the entire iteration space with one iteration mapped to -/// each processor. So implements a cyclic distribution of iterations to -/// processors. -static LogicalResult distributeCyclicallyToProcessors( - ConversionPatternRewriter &rewriter, scf::ParallelOp pLoopOp, - ArrayRef<linalg::ProcInfo> procInfo) { - unsigned numLoops = pLoopOp.getNumLoops(); - assert(numLoops == procInfo.size() && - "expected as many ids as number of loops"); - SmallVector<LoopBounds, 2> forBounds; - SmallVector<unsigned, 2> permutation; - forBounds.reserve(numLoops); - permutation.reserve(numLoops); - Location loc = pLoopOp.getLoc(); - auto lbs = pLoopOp.lowerBound(), ubs = pLoopOp.upperBound(), - steps = pLoopOp.step(); - for (unsigned i : llvm::seq<unsigned>(0, procInfo.size())) { - Value mappedLb = rewriter.create<AddIOp>( - loc, lbs[i], - rewriter.create<MulIOp>(loc, steps[i], procInfo[i].procId)); - Value mappedStep = - rewriter.create<MulIOp>(loc, steps[i], procInfo[i].nprocs); - forBounds.push_back({mappedLb, ubs[i], mappedStep}); - permutation.push_back(i); - } - replacePLoopOp(rewriter, pLoopOp, /*newPLoopBounds=*/{}, forBounds, - permutation); - return success(); -} - -/// Distributes scf.parallel to processors with the processors logically -/// arranged with same dimensionality as the number of loops, i.e. a -/// scf.parallel with 2 loops to a 2D grid of processors. `processorIDs` must be -/// of same size as the number of loops and are the values to use for process ID -/// and number of processors along each dimension in the distributed code. This -/// method assumes that the number of processors is greater than or equal to the -/// number of iterations. So just generates an if statement to mask of -/// processors with no work. When the number of processors is known to be -/// exactly equal to the number of iterations, the if statement is not needed as -/// well. In such cases, `generateGuard` can be set to `false` to avoid -/// generating the if statement. -static LogicalResult distributeSingleIterationPerProcessor( - ConversionPatternRewriter &rewriter, scf::ParallelOp pLoopOp, - ArrayRef<linalg::ProcInfo> procInfo, bool generateGuard = false) { - unsigned numLoops = pLoopOp.getNumLoops(); - Location loc = pLoopOp.getLoc(); - assert(numLoops == procInfo.size() && - "expected as many ids as number of loops"); - - auto lbs = pLoopOp.lowerBound(); - auto step = pLoopOp.step(); - SmallVector<Value, 2> ivReplacements; - for (unsigned i : llvm::seq<unsigned>(0, numLoops)) { - Value iterValue = rewriter.create<AddIOp>( - loc, lbs[i], rewriter.create<MulIOp>(loc, procInfo[i].procId, step[i])); - ivReplacements.push_back(iterValue); - } - Region &pLoopOpRegion = pLoopOp.getLoopBody(); - - if (generateGuard) { - TypeConverter::SignatureConversion signatureConverter(numLoops); - Value cond = nullptr; - auto ubs = pLoopOp.upperBound(); - for (unsigned i : llvm::seq<unsigned>(0, numLoops)) { - Value cmp = rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, - ivReplacements[i], ubs[i]); - cond = (cond ? rewriter.create<AndOp>(loc, cond, cmp) : cmp); - signatureConverter.remapInput(i, ivReplacements[i]); - } - rewriter.applySignatureConversion(&pLoopOpRegion, signatureConverter); - scf::IfOp ifOp = buildEmptyIfOp(loc, rewriter, cond); - Region &ifOpRegion = ifOp.getRegion(0); - rewriter.inlineRegionBefore(pLoopOpRegion, ifOpRegion, ifOpRegion.begin()); - } else { - // The body of the scf.parallel needs to be moved into its parent - // operation. - // - Split the block just before the scf.parallel operation. - // - Move the only block of scf.parallel before the newly created block - // (after signature conversion). - // - Add branch from the original block to the moved block of the - // scf.parallel's region, and from the latter to the block created by the - // split operation. - // - Canonicalization will fold these branches away. - Block *destBlock = pLoopOp.getOperation()->getBlock(); - Block *remainingInst = - rewriter.splitBlock(destBlock, Block::iterator(pLoopOp)); - Block *sourceBlock = &pLoopOpRegion.front(); - rewriter.eraseOp(sourceBlock->getTerminator()); - rewriter.mergeBlocks(&pLoopOpRegion.front(), destBlock, ivReplacements); - rewriter.mergeBlocks(remainingInst, destBlock, {}); - } - rewriter.eraseOp(pLoopOp); - return success(); -} - -template <typename GPUIdOp, typename GPUCountOp> -static linalg::ProcInfo getLinearizedGPUProcessorIdAndCount( - Location loc, ConversionPatternRewriter &rewriter) { - SmallVector<linalg::ProcInfo, 3> procInfo = - getGPUProcessorIdsAndCounts<GPUIdOp, GPUCountOp>(rewriter, loc, - kNumGPUDims); - linalg::ProcInfo linearized; - linearized.procId = procInfo[0].procId; - linearized.nprocs = procInfo[0].nprocs; - for (unsigned i = 0; i < kNumGPUDims - 1; ++i) { - linearized.procId = - rewriter.create<MulIOp>(loc, linearized.procId, procInfo[i + 1].nprocs); - linearized.procId = - rewriter.create<AddIOp>(loc, linearized.procId, procInfo[i + 1].procId); - linearized.nprocs = - rewriter.create<MulIOp>(loc, linearized.nprocs, procInfo[i + 1].nprocs); - } - return linearized; -} - /// Distributes scf.parallel to processors where `IdOp` is used to get the /// processor ID and `DimOp` is used to get the number of processors along a /// dimension. Assumes that the number of processors will be less than equal to @@ -402,38 +115,6 @@ generateGuard); } -/// Returns the number of bytes copied when loading to/storing from workgorup -/// memory. It is approximated to be the size of the underlying allocation being -/// copied into/from. -static Optional<int64_t> getLinearizedCopySize(linalg::CopyOp copyOp) { - Value src = copyOp.input(); - Value dst = copyOp.output(); - MemRefType srcType = src.getType().cast<MemRefType>(); - MemRefType dstType = dst.getType().cast<MemRefType>(); - - Value workgroupMemoryView; - MemRefType workgroupMemoryType; - if (srcType.getMemorySpaceAsInt() == getWorkgroupMemorySpace()) { - workgroupMemoryView = src; - workgroupMemoryType = srcType; - } else if (dstType.getMemorySpaceAsInt() == getWorkgroupMemorySpace()) { - workgroupMemoryView = dst; - workgroupMemoryType = dstType; - } else { - return {}; - } - - memref::SubViewOp workgroupMemorySubviewOp = - dyn_cast_or_null<memref::SubViewOp>(workgroupMemoryView.getDefiningOp()); - if (!workgroupMemorySubviewOp) return {}; - memref::AllocOp allocOp = dyn_cast_or_null<memref::AllocOp>( - workgroupMemorySubviewOp.source().getDefiningOp()); - if (!allocOp) return {}; - - MemRefType allocOpType = allocOp.getType(); - if (!allocOpType.hasStaticShape()) return {}; - return allocOpType.getNumElements(); -} //===----------------------------------------------------------------------===// // Pass and patterns. @@ -450,72 +131,6 @@ void runOnOperation() override; }; -static LogicalResult distributeCopyOp(linalg::CopyOp copyOp, - scf::ParallelOp pLoopOp, - ConversionPatternRewriter &rewriter) { - pLoopOp = collapseParallelLoops(rewriter, pLoopOp); - if (!pLoopOp) return failure(); - - Optional<int64_t> copyLength = getLinearizedCopySize(copyOp); - linalg::ProcInfo idAndCount = - getLinearizedGPUProcessorIdAndCount<gpu::ThreadIdOp, gpu::BlockDimOp>( - copyOp.getLoc(), rewriter); - auto workgroupSize = - spirv::lookupLocalWorkGroupSize(copyOp).getValues<APInt>(); - int64_t linearizedWorkgroupSize = std::accumulate( - workgroupSize.begin(), workgroupSize.end(), 1, - [](int64_t total, APInt value) { return total * value.getSExtValue(); }); - - if (copyLength.hasValue() && !workgroupSize.empty() && - copyLength.getValue() <= linearizedWorkgroupSize) { - return distributeSingleIterationPerProcessor(rewriter, pLoopOp, idAndCount, - /*generateGuard=*/true); - } - return distributeCyclicallyToProcessors(rewriter, pLoopOp, idAndCount); -} - -/// CopyOp that are loading to/storing from workgroup memory are special cased -/// to use all workitems to do a copy. This is done by linearizing the copy -/// operation. -// TODO(ravishankarm): This linearization is achieved through collapsing the -// generated parallel loops from a multi-dimensional copy. Such lowering results -// in mods/divs in the collapsed loop body. This can be removed by reshaping the -// copy to be a 1D copy. This seems to be hitting an error in reshape -// canonicalization. Investigate this further. -struct SerializeAndDistributeCopy : public OpConversionPattern<linalg::CopyOp> { - using OpConversionPattern::OpConversionPattern; - - LogicalResult matchAndRewrite( - linalg::CopyOp copyOp, ArrayRef<Value> operands, - ConversionPatternRewriter &rewriter) const override { - if (!hasMarker(copyOp, {getCopyToWorkgroupMemoryMarker()})) - return failure(); - - Optional<linalg::LinalgLoops> loops = - linalg::linalgOpToParallelLoops(rewriter, copyOp); - if (!loops) return failure(); - if (!loops.getValue().empty()) { - auto pLoopOp = cast<scf::ParallelOp>(loops.getValue()[0]); - if (failed(distributeCopyOp(copyOp, pLoopOp, rewriter))) return failure(); - } - - // If the `copyOp` writes to workgroup memory insert barrier after the - // op. - if (llvm::any_of(copyOp.getOperands(), [](Value output) { - MemRefType outputType = output.getType().dyn_cast<MemRefType>(); - return outputType && - outputType.getMemorySpaceAsInt() == getWorkgroupMemorySpace(); - })) { - rewriter.create<spirv::ControlBarrierOp>( - copyOp.getLoc(), spirv::Scope::Workgroup, spirv::Scope::Workgroup, - spirv::MemorySemantics::AcquireRelease); - } - - rewriter.eraseOp(copyOp); - return success(); - } -}; - /// Given the workload return the workgroup count along X obtained by /// linearizing the workload and dividing by the workgroup size. static Value getWorkgroupCountX(OpBuilder &builder, Location loc, @@ -587,62 +202,6 @@ } // namespace -// Applies tiling followed to load/store optimized size then distribute on -// incovations. -static LogicalResult tileAndDistributeCopy( - linalg::CopyOp copyOp, ArrayRef<Value> operands, - ConversionPatternRewriter &rewriter) { - linalg::LinalgTilingOptions options; - // Tile to memory access of 128bits as those tend to be optimal on most GPUs. - constexpr unsigned vecLoadBits = 128; - unsigned elementBits = - copyOp.getSource().getType().cast<MemRefType>().getElementTypeBitWidth(); - if (elementBits == 0 || vecLoadBits % elementBits != 0) return failure(); - unsigned numElement = vecLoadBits / elementBits; - options.setTileSizes({1, numElement}) - .setLoopType(linalg::LinalgTilingLoopType::ParallelLoops); - Optional<linalg::TiledLinalgOp> tiledOp = - linalg::tileLinalgOp(rewriter, copyOp, options); - if (!tiledOp) return failure(); - if (tiledOp->loops.empty()) return success(); - setMarker(tiledOp->op, getVectorizeMarker()); - auto pLoopOp = cast<scf::ParallelOp>(tiledOp->loops[0]); - return distributeCopyOp(copyOp, pLoopOp, rewriter); -} - -namespace { -// Pattern to tile and distribute linalg::CopyOp. -struct TileAndDistributeCopyOp : public OpConversionPattern<linalg::CopyOp> { - using OpConversionPattern<linalg::CopyOp>::OpConversionPattern; - LogicalResult matchAndRewrite( - linalg::CopyOp linalgOp, ArrayRef<Value> operands, - ConversionPatternRewriter &rewriter) const override { - if (!hasMarker(linalgOp, getCopyToWorkgroupMemoryMarker())) { - return failure(); - } - if (failed(tileAndDistributeCopy(linalgOp, operands, rewriter))) { - return failure(); - } - - // Insert a barrier if read or write shared memory. - if (llvm::any_of(linalgOp.getOperands(), [](Value output) { - return output.getType().cast<MemRefType>().getMemorySpaceAsInt() == - getWorkgroupMemorySpace(); - })) { - rewriter.create<spirv::ControlBarrierOp>( - linalgOp.getLoc(), spirv::Scope::Workgroup, spirv::Scope::Workgroup, - spirv::MemorySemantics::AcquireRelease); - } - rewriter.eraseOp(linalgOp); - return success(); - } -}; -} // namespace - -void populateTileAndDistributeLinalgCopyPatterns( - MLIRContext *context, OwningRewritePatternList &patterns) { - patterns.insert<TileAndDistributeCopyOp>(context); -} void SPIRVConvertToGPUPass::runOnOperation() { MLIRContext *context = &getContext(); @@ -661,8 +220,7 @@ patterns.insert<MapLinalgOpToGlobalInvocationId<linalg::CopyOp>, MapLinalgOpToGlobalInvocationId<linalg::FillOp>, - MapLinalgOpToGlobalInvocationId<linalg::GenericOp>, - SerializeAndDistributeCopy>(context); + MapLinalgOpToGlobalInvocationId<linalg::GenericOp>>(context); FrozenRewritePatternSet frozenPatterns(std::move(patterns)); for (FuncOp funcOp : getOperation().getInnerModule().getOps<FuncOp>()) {
diff --git a/iree/compiler/Codegen/SPIRV/SPIRVCopyToWorkgroupMemory.cpp b/iree/compiler/Codegen/SPIRV/SPIRVCopyToWorkgroupMemory.cpp new file mode 100644 index 0000000..ac1e0ea --- /dev/null +++ b/iree/compiler/Codegen/SPIRV/SPIRVCopyToWorkgroupMemory.cpp
@@ -0,0 +1,319 @@ +// Copyright 2020 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +//===---- SPIRVCopyToWorkgroupMemoryPass.cpp ------------------------------===// +// +// This pass lowers linalg.copy for copying data to the workgroup memory. +// +//===----------------------------------------------------------------------===// + +#include <memory> +#include <numeric> + +#include "iree/compiler/Codegen/PassDetail.h" +#include "iree/compiler/Codegen/Passes.h" +#include "iree/compiler/Codegen/SPIRV/MemorySpace.h" +#include "iree/compiler/Codegen/SPIRV/Utils.h" +#include "iree/compiler/Codegen/Transforms/Transforms.h" +#include "iree/compiler/Codegen/Utils/MarkerUtils.h" +#include "iree/compiler/Codegen/Utils/Utils.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/FormatVariadic.h" +#include "mlir/Dialect/GPU/GPUDialect.h" +#include "mlir/Dialect/Linalg/IR/LinalgOps.h" +#include "mlir/Dialect/Linalg/Transforms/CodegenStrategy.h" +#include "mlir/Dialect/Linalg/Transforms/Transforms.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/SCF/SCF.h" +#include "mlir/Dialect/SPIRV/IR/SPIRVOps.h" +#include "mlir/Dialect/SPIRV/IR/TargetAndABI.h" +#include "mlir/Dialect/StandardOps/IR/Ops.h" +#include "mlir/Dialect/Vector/VectorOps.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/OpDefinition.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Support/LLVM.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Transforms/DialectConversion.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" + +namespace mlir { +namespace iree_compiler { +namespace { + +template <typename GPUIdOp, typename GPUCountOp> +linalg::ProcInfo getLinearizedGPUProcessorIdAndCount( + Location loc, ConversionPatternRewriter &rewriter) { + SmallVector<linalg::ProcInfo, 3> procInfo = + getGPUProcessorIdsAndCounts<GPUIdOp, GPUCountOp>(rewriter, loc, + kNumGPUDims); + linalg::ProcInfo linearized; + linearized.procId = procInfo[0].procId; + linearized.nprocs = procInfo[0].nprocs; + for (unsigned i = 0; i < kNumGPUDims - 1; ++i) { + linearized.procId = + rewriter.create<MulIOp>(loc, linearized.procId, procInfo[i + 1].nprocs); + linearized.procId = + rewriter.create<AddIOp>(loc, linearized.procId, procInfo[i + 1].procId); + linearized.nprocs = + rewriter.create<MulIOp>(loc, linearized.nprocs, procInfo[i + 1].nprocs); + } + return linearized; +} + +/// Distributes scf.parallel to processors with the processors logically +/// arranged with same dimensionality as the number of loops, i.e. a +/// scf.parallel with 2 loops to a 2D grid of processors. `processorIDs` and +/// `numProcessors` must be of same size as the number of loops and are the +/// values to use for process ID and number of processors along each dimension +/// in the distributed code. +/// This method accounts for the case where the number of processors is not +/// enough to execute the entire iteration space with one iteration mapped to +/// each processor. So implements a cyclic distribution of iterations to +/// processors. +LogicalResult distributeCyclicallyToProcessors( + ConversionPatternRewriter &rewriter, scf::ParallelOp pLoopOp, + ArrayRef<linalg::ProcInfo> procInfo) { + unsigned numLoops = pLoopOp.getNumLoops(); + assert(numLoops == procInfo.size() && + "expected as many ids as number of loops"); + SmallVector<LoopBounds, 2> forBounds; + SmallVector<unsigned, 2> permutation; + forBounds.reserve(numLoops); + permutation.reserve(numLoops); + Location loc = pLoopOp.getLoc(); + auto lbs = pLoopOp.lowerBound(), ubs = pLoopOp.upperBound(), + steps = pLoopOp.step(); + for (unsigned i : llvm::seq<unsigned>(0, procInfo.size())) { + Value mappedLb = rewriter.create<AddIOp>( + loc, lbs[i], + rewriter.create<MulIOp>(loc, steps[i], procInfo[i].procId)); + Value mappedStep = + rewriter.create<MulIOp>(loc, steps[i], procInfo[i].nprocs); + forBounds.push_back({mappedLb, ubs[i], mappedStep}); + permutation.push_back(i); + } + replacePLoopOp(rewriter, pLoopOp, /*newPLoopBounds=*/{}, forBounds, + permutation); + return success(); +} + +/// Returns the number of bytes copied when loading to/storing from workgorup +/// memory. It is approximated to be the size of the underlying allocation being +/// copied into/from. +Optional<int64_t> getLinearizedCopySize(linalg::CopyOp copyOp) { + Value src = copyOp.input(); + Value dst = copyOp.output(); + MemRefType srcType = src.getType().cast<MemRefType>(); + MemRefType dstType = dst.getType().cast<MemRefType>(); + + Value workgroupMemoryView; + MemRefType workgroupMemoryType; + if (srcType.getMemorySpaceAsInt() == getWorkgroupMemorySpace()) { + workgroupMemoryView = src; + workgroupMemoryType = srcType; + } else if (dstType.getMemorySpaceAsInt() == getWorkgroupMemorySpace()) { + workgroupMemoryView = dst; + workgroupMemoryType = dstType; + } else { + return {}; + } + + memref::SubViewOp workgroupMemorySubviewOp = + dyn_cast_or_null<memref::SubViewOp>(workgroupMemoryView.getDefiningOp()); + if (!workgroupMemorySubviewOp) return {}; + memref::AllocOp allocOp = dyn_cast_or_null<memref::AllocOp>( + workgroupMemorySubviewOp.source().getDefiningOp()); + if (!allocOp) return {}; + + MemRefType allocOpType = allocOp.getType(); + if (!allocOpType.hasStaticShape()) return {}; + return allocOpType.getNumElements(); +} + +LogicalResult distributeCopyOp(linalg::CopyOp copyOp, scf::ParallelOp pLoopOp, + ConversionPatternRewriter &rewriter) { + pLoopOp = collapseParallelLoops(rewriter, pLoopOp); + if (!pLoopOp) return failure(); + + Optional<int64_t> copyLength = getLinearizedCopySize(copyOp); + linalg::ProcInfo idAndCount = + getLinearizedGPUProcessorIdAndCount<gpu::ThreadIdOp, gpu::BlockDimOp>( + copyOp.getLoc(), rewriter); + auto workgroupSize = + spirv::lookupLocalWorkGroupSize(copyOp).getValues<APInt>(); + int64_t linearizedWorkgroupSize = std::accumulate( + workgroupSize.begin(), workgroupSize.end(), 1, + [](int64_t total, APInt value) { return total * value.getSExtValue(); }); + + if (copyLength.hasValue() && !workgroupSize.empty() && + copyLength.getValue() <= linearizedWorkgroupSize) { + return distributeSingleIterationPerProcessor(rewriter, pLoopOp, idAndCount, + /*generateGuard=*/true); + } + return distributeCyclicallyToProcessors(rewriter, pLoopOp, idAndCount); +} + +// Applies tiling followed to load/store optimized size then distribute on +// incovations. +LogicalResult tileAndDistributeCopy(linalg::CopyOp copyOp, + ArrayRef<Value> operands, + ConversionPatternRewriter &rewriter) { + linalg::LinalgTilingOptions options; + // Tile to memory access of 128bits as those tend to be optimal on most GPUs. + constexpr unsigned vecLoadBits = 128; + unsigned elementBits = + copyOp.getSource().getType().cast<MemRefType>().getElementTypeBitWidth(); + if (elementBits == 0 || vecLoadBits % elementBits != 0) return failure(); + unsigned numElement = vecLoadBits / elementBits; + options.setTileSizes({1, numElement}) + .setLoopType(linalg::LinalgTilingLoopType::ParallelLoops); + Optional<linalg::TiledLinalgOp> tiledOp = + linalg::tileLinalgOp(rewriter, copyOp, options); + if (!tiledOp) return failure(); + if (tiledOp->loops.empty()) return success(); + setMarker(tiledOp->op, getVectorizeMarker()); + auto pLoopOp = cast<scf::ParallelOp>(tiledOp->loops[0]); + return distributeCopyOp(copyOp, pLoopOp, rewriter); +} + +// Pattern to tile and distribute linalg::CopyOp. +struct TileAndDistributeCopyOp : public OpConversionPattern<linalg::CopyOp> { + using OpConversionPattern<linalg::CopyOp>::OpConversionPattern; + LogicalResult matchAndRewrite( + linalg::CopyOp linalgOp, ArrayRef<Value> operands, + ConversionPatternRewriter &rewriter) const override { + if (!hasMarker(linalgOp, getCopyToWorkgroupMemoryMarker())) { + return failure(); + } + if (failed(tileAndDistributeCopy(linalgOp, operands, rewriter))) { + return failure(); + } + + // Insert a barrier if read or write shared memory. + if (llvm::any_of(linalgOp.getOperands(), [](Value output) { + return output.getType().cast<MemRefType>().getMemorySpaceAsInt() == + getWorkgroupMemorySpace(); + })) { + rewriter.create<spirv::ControlBarrierOp>( + linalgOp.getLoc(), spirv::Scope::Workgroup, spirv::Scope::Workgroup, + spirv::MemorySemantics::AcquireRelease); + } + rewriter.eraseOp(linalgOp); + return success(); + } +}; + +/// CopyOp that are loading to/storing from workgroup memory are special cased +/// to use all workitems to do a copy. This is done by linearizing the copy +/// operation. +// TODO(ravishankarm): This linearization is achieved through collapsing the +// generated parallel loops from a multi-dimensional copy. Such lowering results +// in mods/divs in the collapsed loop body. This can be removed by reshaping the +// copy to be a 1D copy. This seems to be hitting an error in reshape +// canonicalization. Investigate this further. +struct SerializeAndDistributeCopy : public OpConversionPattern<linalg::CopyOp> { + using OpConversionPattern::OpConversionPattern; + + LogicalResult matchAndRewrite( + linalg::CopyOp copyOp, ArrayRef<Value> operands, + ConversionPatternRewriter &rewriter) const override { + if (!hasMarker(copyOp, {getCopyToWorkgroupMemoryMarker()})) + return failure(); + + Optional<linalg::LinalgLoops> loops = + linalg::linalgOpToParallelLoops(rewriter, copyOp); + if (!loops) return failure(); + if (!loops.getValue().empty()) { + auto pLoopOp = cast<scf::ParallelOp>(loops.getValue()[0]); + if (failed(distributeCopyOp(copyOp, pLoopOp, rewriter))) return failure(); + } + + // If the `copyOp` writes to workgroup memory insert barrier after the + // op. + if (llvm::any_of(copyOp.getOperands(), [](Value output) { + MemRefType outputType = output.getType().dyn_cast<MemRefType>(); + return outputType && + outputType.getMemorySpaceAsInt() == getWorkgroupMemorySpace(); + })) { + rewriter.create<spirv::ControlBarrierOp>( + copyOp.getLoc(), spirv::Scope::Workgroup, spirv::Scope::Workgroup, + spirv::MemorySemantics::AcquireRelease); + } + + rewriter.eraseOp(copyOp); + return success(); + } +}; + +struct SPIRVCopyToWorkgroupMemoryPass + : public SPIRVCopyToWorkgroupMemoryBase<SPIRVCopyToWorkgroupMemoryPass> { + void getDependentDialects(DialectRegistry ®istry) const override { + registry.insert<AffineDialect, gpu::GPUDialect, memref::MemRefDialect, + scf::SCFDialect, vector::VectorDialect>(); + } + + void runOnOperation() override; + + private: + void tileAndVectorizeLinalgCopy(FuncOp funcOp, MLIRContext *context); + void lowerVectorOps(FuncOp funcOp, MLIRContext *context); +}; + +void SPIRVCopyToWorkgroupMemoryPass::tileAndVectorizeLinalgCopy( + FuncOp funcOp, MLIRContext *context) { + // 1. Tile linalg and distribute it on invocations. + std::unique_ptr<ConversionTarget> target = + std::make_unique<ConversionTarget>(*context); + target->addDynamicallyLegalOp<linalg::CopyOp>([&](linalg::CopyOp copy) { + return !(hasMarker(copy, getCopyToWorkgroupMemoryMarker())); + }); + target->markUnknownOpDynamicallyLegal([](Operation *) { return true; }); + + OwningRewritePatternList patterns(&getContext()); + // TODO(antiagainst): Re-enable vectorizing workgroup memory copy once the + // whole pipeline is in a better state. + // patterns.add<TileAndDistributeCopyOp>(context); + patterns.add<SerializeAndDistributeCopy>(context); + if (failed(applyPartialConversion(funcOp, *target, std::move(patterns)))) { + return signalPassFailure(); + } + + // 2. Canonicalize the IR generated by tiling. + RewritePatternSet canonicalizePatterns = + linalg::getLinalgTilingCanonicalizationPatterns(context); + populateAffineMinCanonicalizationPattern(canonicalizePatterns); + canonicalizePatterns.insert<linalg::AffineMinSCFCanonicalizationPattern>( + context); + (void)applyPatternsAndFoldGreedily(funcOp, std::move(canonicalizePatterns)); + + // 3. Vectorize the tiled linalg to be able to map it to load/store vector. + OwningRewritePatternList vectorizationPatterns(&getContext()); + linalg::insertVectorizationPatterns<linalg::CopyOp>( + vectorizationPatterns, linalg::LinalgVectorizationOptions(), + linalg::LinalgTransformationFilter( + Identifier::get(getVectorizeMarker(), context), {})); + (void)applyPatternsAndFoldGreedily(funcOp, std::move(vectorizationPatterns)); +} + +void SPIRVCopyToWorkgroupMemoryPass::runOnOperation() { + MLIRContext *context = &getContext(); + FuncOp funcOp = getOperation(); + tileAndVectorizeLinalgCopy(funcOp, context); +} +} // namespace + +//===----------------------------------------------------------------------===// +// Pass entry point and registration +//===----------------------------------------------------------------------===// +std::unique_ptr<OperationPass<FuncOp>> createSPIRVCopyToWorkgroupMemoryPass() { + return std::make_unique<SPIRVCopyToWorkgroupMemoryPass>(); +} + +} // namespace iree_compiler +} // namespace mlir
diff --git a/iree/compiler/Codegen/SPIRV/SPIRVVectorToGPU.cpp b/iree/compiler/Codegen/SPIRV/SPIRVVectorToGPU.cpp deleted file mode 100644 index e686e2b..0000000 --- a/iree/compiler/Codegen/SPIRV/SPIRVVectorToGPU.cpp +++ /dev/null
@@ -1,108 +0,0 @@ -// Copyright 2020 The IREE Authors -// -// Licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -//===---- SPIRVVectorToGPUPass.cpp - Pass for the final SPIR-V conversion -===// -// -// This file implement a pass to convert vector dialect operations to GPU -// operations distributed across a subgroup. -// -//===----------------------------------------------------------------------===// -#include <memory> - -#include "iree/compiler/Codegen/PassDetail.h" -#include "iree/compiler/Codegen/Passes.h" -#include "iree/compiler/Codegen/Transforms/Transforms.h" -#include "iree/compiler/Codegen/Utils/MarkerUtils.h" -#include "iree/compiler/Codegen/Utils/Utils.h" -#include "llvm/ADT/STLExtras.h" -#include "llvm/Support/FormatVariadic.h" -#include "mlir/Dialect/GPU/GPUDialect.h" -#include "mlir/Dialect/Linalg/IR/LinalgOps.h" -#include "mlir/Dialect/Linalg/Transforms/CodegenStrategy.h" -#include "mlir/Dialect/Linalg/Transforms/Transforms.h" -#include "mlir/Dialect/MemRef/IR/MemRef.h" -#include "mlir/Dialect/SCF/SCF.h" -#include "mlir/Dialect/SPIRV/IR/TargetAndABI.h" -#include "mlir/Dialect/StandardOps/IR/Ops.h" -#include "mlir/Dialect/Vector/VectorOps.h" -#include "mlir/IR/BuiltinOps.h" -#include "mlir/IR/BuiltinTypes.h" -#include "mlir/IR/OpDefinition.h" -#include "mlir/IR/PatternMatch.h" -#include "mlir/Pass/Pass.h" -#include "mlir/Support/LLVM.h" -#include "mlir/Support/LogicalResult.h" -#include "mlir/Transforms/DialectConversion.h" -#include "mlir/Transforms/GreedyPatternRewriteDriver.h" - -namespace mlir { -namespace iree_compiler { -namespace { - -struct SPIRVVectorToGPUPass - : public SPIRVVectorToGPUBase<SPIRVVectorToGPUPass> { - void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert<AffineDialect, gpu::GPUDialect, memref::MemRefDialect, - scf::SCFDialect, vector::VectorDialect>(); - } - - void runOnOperation() override; - - private: - void tileAndVectorizeLinalgCopy(FuncOp funcOp, MLIRContext *context); - void lowerVectorOps(FuncOp funcOp, MLIRContext *context); -}; - -void SPIRVVectorToGPUPass::tileAndVectorizeLinalgCopy(FuncOp funcOp, - MLIRContext *context) { - // 1. Tile linalg and distribute it on invocations. - std::unique_ptr<ConversionTarget> target = - std::make_unique<ConversionTarget>(*context); - target->addDynamicallyLegalOp<linalg::CopyOp>([&](linalg::CopyOp copy) { - return !(hasMarker(copy, getCopyToWorkgroupMemoryMarker())); - }); - target->markUnknownOpDynamicallyLegal([](Operation *) { return true; }); - OwningRewritePatternList tileAndDistributePattern(&getContext()); - populateTileAndDistributeLinalgCopyPatterns(context, - tileAndDistributePattern); - if (failed(applyPartialConversion(funcOp, *target, - std::move(tileAndDistributePattern)))) { - return signalPassFailure(); - } - - // 2. Canonicalize the IR generated by tiling. - RewritePatternSet canonicalizePatterns = - linalg::getLinalgTilingCanonicalizationPatterns(context); - populateAffineMinCanonicalizationPattern(canonicalizePatterns); - canonicalizePatterns.insert<linalg::AffineMinSCFCanonicalizationPattern>( - context); - (void)applyPatternsAndFoldGreedily(funcOp, std::move(canonicalizePatterns)); - - // 3. Vectorize the tiled linalg to be able to map it to load/store vector. - OwningRewritePatternList vectorizationPatterns(&getContext()); - linalg::insertVectorizationPatterns<linalg::CopyOp>( - vectorizationPatterns, linalg::LinalgVectorizationOptions(), - linalg::LinalgTransformationFilter( - Identifier::get(getVectorizeMarker(), context), {})); - (void)applyPatternsAndFoldGreedily(funcOp, std::move(vectorizationPatterns)); -} - -void SPIRVVectorToGPUPass::runOnOperation() { - MLIRContext *context = &getContext(); - FuncOp funcOp = getOperation(); - tileAndVectorizeLinalgCopy(funcOp, context); -} -} // namespace - -//===----------------------------------------------------------------------===// -// Pass entry point and registration -//===----------------------------------------------------------------------===// -std::unique_ptr<OperationPass<FuncOp>> createSPIRVVectorToGPUPass() { - return std::make_unique<SPIRVVectorToGPUPass>(); -} - -} // namespace iree_compiler -} // namespace mlir
diff --git a/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp b/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp index b821de9..9d27a9f 100644 --- a/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp +++ b/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp
@@ -15,7 +15,7 @@ #include "iree/compiler/Codegen/PassDetail.h" #include "iree/compiler/Codegen/Passes.h" #include "iree/compiler/Codegen/Utils/Utils.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "llvm/ADT/TypeSwitch.h" #include "mlir/Dialect/Linalg/IR/LinalgOps.h" #include "mlir/Dialect/MemRef/IR/MemRef.h"
diff --git a/iree/compiler/Codegen/SPIRV/Utils.cpp b/iree/compiler/Codegen/SPIRV/Utils.cpp index 2a47649..f18ecc7 100644 --- a/iree/compiler/Codegen/SPIRV/Utils.cpp +++ b/iree/compiler/Codegen/SPIRV/Utils.cpp
@@ -158,5 +158,203 @@ getGPUProcessorIdsAndCounts<GPUGlobalId, GPUGlobalCount>(OpBuilder &builder, Location loc, unsigned numDims); + +scf::ParallelOp collapseParallelLoops(PatternRewriter &rewriter, + scf::ParallelOp pLoopOp) { + if (pLoopOp.getNumReductions()) return nullptr; + + unsigned numLoops = pLoopOp.getNumLoops(); + if (numLoops == 1) return pLoopOp; + + // Compute the number of iterations of each loops starting from the innermost. + Location loc = pLoopOp.getLoc(); + Value totalNumIterations = rewriter.create<ConstantIndexOp>(loc, 1); + + // Track the "stride" of each loop, i.e. product of the total number of + // iterations of the inner loops. + SmallVector<Value, 2> iterationStride; + iterationStride.resize(pLoopOp.getNumLoops()); + auto lbs = pLoopOp.lowerBound(); + auto ubs = pLoopOp.upperBound(); + auto steps = pLoopOp.step(); + for (int i = numLoops - 1; i >= 0; --i) { + Value lb = lbs[i], ub = ubs[i], step = steps[i]; + Value iterCount = rewriter.create<SignedDivIOp>( + loc, rewriter.create<SubIOp>(loc, ub, lb), step); + iterationStride[i] = totalNumIterations; + totalNumIterations = + rewriter.create<MulIOp>(loc, totalNumIterations, iterCount); + } + + // Create the collapsed parallel loop op with lowerbound 0, step 1 and upper + // bound being the totalNumIterations. + Value newLb = rewriter.create<ConstantIndexOp>(loc, 0); + Value newStep = rewriter.create<ConstantIndexOp>(loc, 1); + scf::ParallelOp newPLoopOp = + rewriter.create<scf::ParallelOp>(loc, newLb, totalNumIterations, newStep); + + // Build the body of the collapsed loop by cloning the original loop body. The + // replacement value of the induction variables of the original loop body, + // from the induction variable of the new loop, using + // origLoopIv[i] = loopIv / iterationStride[i] + // loopIv = loopIv % iterationStride[i] + OpBuilder::InsertionGuard guard(rewriter); + Block &pLoopBody = pLoopOp.getLoopBody().front(); + rewriter.setInsertionPointToStart(&newPLoopOp.getLoopBody().front()); + Value loopIv = *newPLoopOp.getInductionVars().begin(); + BlockAndValueMapping map; + for (int i : llvm::seq<int>(0, numLoops)) { + Value iterNum = + rewriter.create<SignedDivIOp>(loc, loopIv, iterationStride[i]); + AffineExpr d0, d1; + bindDims(rewriter.getContext(), d0, d1); + AffineExpr s0 = getAffineSymbolExpr(0, rewriter.getContext()); + Value newIv = makeComposedAffineApply(rewriter, loc, d0 + d1 * s0, + {lbs[i], iterNum, steps[i]}); + map.map(pLoopBody.getArgument(i), newIv); + loopIv = rewriter.create<SignedRemIOp>(loc, loopIv, iterationStride[i]); + } + for (Operation &op : pLoopBody.without_terminator()) { + rewriter.clone(op, map); + } + rewriter.eraseOp(pLoopOp); + return newPLoopOp; +} + +/// Builds an empty scf.for operation. The default builder adds an entry basic +/// block which needs to be avoided here. +static scf::ForOp buildEmptyForOp(Location loc, OpBuilder &builder, Value lb, + Value ub, Value step) { + OperationState state(loc, scf::ForOp::getOperationName()); + state.addOperands({lb, ub, step}); + state.addRegion(); + return cast<scf::ForOp>(builder.createOperation(state)); +} + +Operation *replacePLoopOp(ConversionPatternRewriter &rewriter, + scf::ParallelOp pLoopOp, + ArrayRef<LoopBounds> newPLoopBounds, + ArrayRef<LoopBounds> forBounds, + ArrayRef<unsigned> permutation) { + assert(!forBounds.empty() && "unhandled case of no scf.for created"); + unsigned numLoops = pLoopOp.getNumLoops(); + Location loc = pLoopOp.getLoc(); + assert(forBounds.size() + newPLoopBounds.size() == numLoops && + "cannot drop loops when splitting scf.parallel operation"); + assert(permutation.size() == numLoops); + OpBuilder::InsertionGuard guard(rewriter); + + // Need a signature conversion for the body of the scf.parallel operation, + // before can it can be used as the body of the innermost loop created here. + TypeConverter::SignatureConversion signatureConverter(numLoops); + Operation *outermostLoop = nullptr; + auto permuteIt = permutation.begin(); + + // Create the scf.parallel operation as the outermost loop, if specified. + if (!newPLoopBounds.empty()) { + auto lbs = llvm::to_vector<2>(llvm::map_range( + newPLoopBounds, [](LoopBounds bounds) -> Value { return bounds.lb; })); + auto ubs = llvm::to_vector<2>(llvm::map_range( + newPLoopBounds, [](LoopBounds bounds) { return bounds.ub; })); + auto steps = llvm::to_vector<2>(llvm::map_range( + newPLoopBounds, [](LoopBounds bounds) { return bounds.step; })); + auto newPLoop = rewriter.create<scf::ParallelOp>(loc, lbs, ubs, steps); + for (auto iv : newPLoop.getInductionVars()) { + signatureConverter.remapInput(*permuteIt, iv); + permuteIt++; + } + rewriter.setInsertionPointToStart(newPLoop.getBody()); + outermostLoop = newPLoop.getOperation(); + } + + // Generate the nested scf.for operations with the bounds passed. + for (auto it : enumerate(forBounds)) { + Value lb = it.value().lb, ub = it.value().ub, step = it.value().step; + if (it.index() != forBounds.size() - 1) { + auto forOp = rewriter.create<scf::ForOp>(loc, lb, ub, step); + if (!outermostLoop) outermostLoop = forOp.getOperation(); + signatureConverter.remapInput(*permuteIt, forOp.getInductionVar()); + rewriter.setInsertionPointToStart(forOp.getBody()); + } else { + // For the last loop, move the body of the scf.parallel op as the body of + // the loop after signature conversion. + auto forOp = buildEmptyForOp(loc, rewriter, lb, ub, step); + if (!outermostLoop) outermostLoop = forOp.getOperation(); + signatureConverter.addInputs(*permuteIt, rewriter.getIndexType()); + Region &pLoopOpRegion = pLoopOp.getLoopBody(); + rewriter.applySignatureConversion(&pLoopOpRegion, signatureConverter); + Region &forOpRegion = forOp.getLoopBody(); + rewriter.inlineRegionBefore(pLoopOpRegion, forOpRegion, + forOpRegion.begin()); + } + permuteIt++; + } + rewriter.eraseOp(pLoopOp); + return outermostLoop; +} + +/// Builds an empty scf.if operation without the then and else blocks. +static scf::IfOp buildEmptyIfOp(Location loc, OpBuilder &builder, Value cond) { + OperationState state(loc, scf::IfOp::getOperationName()); + state.addOperands(cond); + state.addRegion(); + state.addRegion(); + return cast<scf::IfOp>(builder.createOperation(state)); +} + +LogicalResult distributeSingleIterationPerProcessor( + ConversionPatternRewriter &rewriter, scf::ParallelOp pLoopOp, + ArrayRef<linalg::ProcInfo> procInfo, bool generateGuard) { + unsigned numLoops = pLoopOp.getNumLoops(); + Location loc = pLoopOp.getLoc(); + assert(numLoops == procInfo.size() && + "expected as many ids as number of loops"); + + auto lbs = pLoopOp.lowerBound(); + auto step = pLoopOp.step(); + SmallVector<Value, 2> ivReplacements; + for (unsigned i : llvm::seq<unsigned>(0, numLoops)) { + Value iterValue = rewriter.create<AddIOp>( + loc, lbs[i], rewriter.create<MulIOp>(loc, procInfo[i].procId, step[i])); + ivReplacements.push_back(iterValue); + } + Region &pLoopOpRegion = pLoopOp.getLoopBody(); + + if (generateGuard) { + TypeConverter::SignatureConversion signatureConverter(numLoops); + Value cond = nullptr; + auto ubs = pLoopOp.upperBound(); + for (unsigned i : llvm::seq<unsigned>(0, numLoops)) { + Value cmp = rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, + ivReplacements[i], ubs[i]); + cond = (cond ? rewriter.create<AndOp>(loc, cond, cmp) : cmp); + signatureConverter.remapInput(i, ivReplacements[i]); + } + rewriter.applySignatureConversion(&pLoopOpRegion, signatureConverter); + scf::IfOp ifOp = buildEmptyIfOp(loc, rewriter, cond); + Region &ifOpRegion = ifOp.getRegion(0); + rewriter.inlineRegionBefore(pLoopOpRegion, ifOpRegion, ifOpRegion.begin()); + } else { + // The body of the scf.parallel needs to be moved into its parent + // operation. + // - Split the block just before the scf.parallel operation. + // - Move the only block of scf.parallel before the newly created block + // (after signature conversion). + // - Add branch from the original block to the moved block of the + // scf.parallel's region, and from the latter to the block created by the + // split operation. + // - Canonicalization will fold these branches away. + Block *destBlock = pLoopOp.getOperation()->getBlock(); + Block *remainingInst = + rewriter.splitBlock(destBlock, Block::iterator(pLoopOp)); + Block *sourceBlock = &pLoopOpRegion.front(); + rewriter.eraseOp(sourceBlock->getTerminator()); + rewriter.mergeBlocks(&pLoopOpRegion.front(), destBlock, ivReplacements); + rewriter.mergeBlocks(remainingInst, destBlock, {}); + } + rewriter.eraseOp(pLoopOp); + return success(); +} + } // namespace iree_compiler } // namespace mlir
diff --git a/iree/compiler/Codegen/SPIRV/Utils.h b/iree/compiler/Codegen/SPIRV/Utils.h index f0d8635..b286642 100644 --- a/iree/compiler/Codegen/SPIRV/Utils.h +++ b/iree/compiler/Codegen/SPIRV/Utils.h
@@ -20,6 +20,7 @@ #include "mlir/IR/Value.h" #include "mlir/Support/LLVM.h" #include "mlir/Support/LogicalResult.h" +#include "mlir/Transforms/DialectConversion.h" #include "mlir/Transforms/FoldUtils.h" namespace mlir { @@ -27,6 +28,10 @@ static constexpr int kNumGPUDims = 3; +//===----------------------------------------------------------------------===// +// Workgroup memory utils +//===----------------------------------------------------------------------===// + /// Allocation callback for allocation workgroup local memory. Optional<Value> allocateWorkgroupMemory(OpBuilder &b, memref::SubViewOp subview, ArrayRef<Value> boundingSubViewSize, @@ -41,6 +46,10 @@ /// Deallocation callback for allocation workgroup local memory. LogicalResult deallocateWorkgroupMemory(OpBuilder &b, Value buffer); +//===----------------------------------------------------------------------===// +// Processor ID/size utils +//===----------------------------------------------------------------------===// + /// Generate the operations that compute the processor ID and number of /// processors. Used as the callback needed for LinalgDistributionOptions. class GPUGlobalId; @@ -53,6 +62,53 @@ LogicalResult updateWorkGroupSize(FuncOp funcOp, ArrayRef<int64_t> workGroupSize); +//===----------------------------------------------------------------------===// +// Loop utils +//===----------------------------------------------------------------------===// + +/// Collapses all loops in a scf.parallel into one scf.parallel operation. This +/// is done by +/// 1) Normalize the loop bounds to be [0, (ub - lb) / step) +/// 2) Compute the total number of iterations. +/// 3) From the induction variable of the modified loop, compute the values of +/// the original induction variables by de-linearization. +scf::ParallelOp collapseParallelLoops(PatternRewriter &rewriter, + scf::ParallelOp pLoopOp); + +struct LoopBounds { + Value lb; + Value ub; + Value step; +}; + +/// Replaces a scf.parallelOp with an optional scf.parallel op and nested +/// scf.for operations. To create the scf.parallel op as the outermost loop, +/// pass the lower bound, upper bound and steps in `newPLoopLbs`, `newPLoopUbs`, +/// and `newPLoopStep` respectively. The bounds of the inner scf.for operations +/// to be created are passed in `forLbs`, `forUbs`, and `forStep`. The +/// `permutation` vector contains a mapping from the original loop order, to the +/// loop order to be generated. +Operation *replacePLoopOp(ConversionPatternRewriter &rewriter, + scf::ParallelOp pLoopOp, + ArrayRef<LoopBounds> newPLoopBounds, + ArrayRef<LoopBounds> forBounds, + ArrayRef<unsigned> permutation); + +/// Distributes scf.parallel to processors with the processors logically +/// arranged with same dimensionality as the number of loops, i.e. a +/// scf.parallel with 2 loops to a 2D grid of processors. `processorIDs` must be +/// of same size as the number of loops and are the values to use for process ID +/// and number of processors along each dimension in the distributed code. This +/// method assumes that the number of processors is greater than or equal to the +/// number of iterations. So just generates an if statement to mask of +/// processors with no work. When the number of processors is known to be +/// exactly equal to the number of iterations, the if statement is not needed as +/// well. In such cases, `generateGuard` can be set to `false` to avoid +/// generating the if statement. +LogicalResult distributeSingleIterationPerProcessor( + ConversionPatternRewriter &rewriter, scf::ParallelOp pLoopOp, + ArrayRef<linalg::ProcInfo> procInfo, bool generateGuard = false); + } // namespace iree_compiler } // namespace mlir
diff --git a/iree/compiler/Codegen/SPIRV/test/BUILD b/iree/compiler/Codegen/SPIRV/test/BUILD index 55f3118..3b9ea7f 100644 --- a/iree/compiler/Codegen/SPIRV/test/BUILD +++ b/iree/compiler/Codegen/SPIRV/test/BUILD
@@ -34,7 +34,7 @@ "tile_and_vectorize_conv.mlir", "tile_and_vectorize_matmul.mlir", "vector_to_cooperative_matrix.mlir", - "vector_to_gpu.mlir", + "vectorize_copy_to_workgroup_memory.mlir", "vectorize_elementwise_ops.mlir", "vectorize_matmul.mlir", "vectorize_load_store.mlir",
diff --git a/iree/compiler/Codegen/SPIRV/test/CMakeLists.txt b/iree/compiler/Codegen/SPIRV/test/CMakeLists.txt index 3022f00..fa0818e 100644 --- a/iree/compiler/Codegen/SPIRV/test/CMakeLists.txt +++ b/iree/compiler/Codegen/SPIRV/test/CMakeLists.txt
@@ -29,7 +29,7 @@ "tile_and_vectorize_conv.mlir" "tile_and_vectorize_matmul.mlir" "vector_to_cooperative_matrix.mlir" - "vector_to_gpu.mlir" + "vectorize_copy_to_workgroup_memory.mlir" "vectorize_elementwise_ops.mlir" "vectorize_load_store.mlir" "vectorize_matmul.mlir"
diff --git a/iree/compiler/Codegen/SPIRV/test/vector_to_gpu.mlir b/iree/compiler/Codegen/SPIRV/test/vectorize_copy_to_workgroup_memory.mlir similarity index 92% rename from iree/compiler/Codegen/SPIRV/test/vector_to_gpu.mlir rename to iree/compiler/Codegen/SPIRV/test/vectorize_copy_to_workgroup_memory.mlir index e808be4..662890b 100644 --- a/iree/compiler/Codegen/SPIRV/test/vector_to_gpu.mlir +++ b/iree/compiler/Codegen/SPIRV/test/vectorize_copy_to_workgroup_memory.mlir
@@ -1,4 +1,7 @@ -// RUN: iree-opt -split-input-file -iree-spirv-vector-to-gpu %s | IreeFileCheck %s +// RUN: iree-opt -split-input-file -iree-spirv-copy-to-workgroup-memory %s +// TODO(antiagainst): Re-enable vectorizing workgroup memory copy once the +// whole pipeline is in a better state. +// | IreeFileCheck %s #map0 = affine_map<(d0, d1)[s0] -> (d0 * 4096 + s0 + d1)>
diff --git a/iree/compiler/Codegen/Utils/Utils.cpp b/iree/compiler/Codegen/Utils/Utils.cpp index a6e2ee5..1b9b9d3 100644 --- a/iree/compiler/Codegen/Utils/Utils.cpp +++ b/iree/compiler/Codegen/Utils/Utils.cpp
@@ -61,6 +61,9 @@ SmallVector<unsigned> getPartitionedLoops(Operation *op) { SmallVector<unsigned> partitionedLoops; + if (auto mmt4dOp = dyn_cast<linalg::Mmt4DOp>(op)) { + return {0, 1}; + } if (auto linalgOp = dyn_cast<linalg::LinalgOp>(op)) { size_t numOuterParallelLoops = getNumOuterParallelLoops(linalgOp); partitionedLoops =
diff --git a/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/cast.mlir b/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/cast.mlir index 10e1b58..5167626 100644 --- a/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/cast.mlir +++ b/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/cast.mlir
@@ -1,4 +1,4 @@ -// RUN: iree-opt -allow-unregistered-dialect -split-input-file -iree-flow-convert-to-flow-tensor-ops-pass %s | IreeFileCheck %s +// RUN: iree-opt -allow-unregistered-dialect -split-input-file -iree-flow-convert-tensor-ops-pass %s | IreeFileCheck %s func @static_tensor_cast_to_dynamic(%arg0: tensor<4x4xf32>) -> tensor<?x?xf32> { // CHECK-DAG: %[[C4:.*]] = constant 4 : index
diff --git a/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/extract_slice.mlir b/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/extract_slice.mlir index 93d3554..ce1bff7 100644 --- a/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/extract_slice.mlir +++ b/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/extract_slice.mlir
@@ -1,4 +1,4 @@ -// RUN: iree-opt -allow-unregistered-dialect -split-input-file -iree-flow-convert-to-flow-tensor-ops-pass %s | IreeFileCheck %s +// RUN: iree-opt -allow-unregistered-dialect -split-input-file -iree-flow-convert-tensor-ops-pass %s | IreeFileCheck %s func @extract_slice1(%arg0 : tensor<5x24x48xf32>) -> tensor<4xf32> { %0 = tensor.extract_slice %arg0[2, 3, 4] [1, 1, 4] [1, 1, 1]
diff --git a/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/from_elements.mlir b/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/from_elements.mlir index 4864467..d380abe 100644 --- a/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/from_elements.mlir +++ b/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/from_elements.mlir
@@ -1,4 +1,4 @@ -// RUN: iree-opt -allow-unregistered-dialect -split-input-file -iree-flow-convert-to-flow-tensor-ops-pass %s | IreeFileCheck %s +// RUN: iree-opt -allow-unregistered-dialect -split-input-file -iree-flow-convert-tensor-ops-pass %s | IreeFileCheck %s // CHECK: func @tensor.from_elements__to__flow.tensor.splat(%[[arg0:.*]]: i8) func @tensor.from_elements__to__flow.tensor.splat(%arg0: i8) -> (i8) {
diff --git a/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/insert_slice.mlir b/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/insert_slice.mlir index 5712ad7..383624e 100644 --- a/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/insert_slice.mlir +++ b/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/insert_slice.mlir
@@ -1,4 +1,4 @@ -// RUN: iree-opt -allow-unregistered-dialect -split-input-file -iree-flow-convert-to-flow-tensor-ops-pass %s | IreeFileCheck %s +// RUN: iree-opt -allow-unregistered-dialect -split-input-file -iree-flow-convert-tensor-ops-pass %s | IreeFileCheck %s func @insert_slice_convert (%arg0 : tensor<?x24x48xf32>, %arg1 : tensor<1x4x48xf32>) ->
diff --git a/iree/compiler/Dialect/Flow/IR/BUILD b/iree/compiler/Dialect/Flow/IR/BUILD index 82f85ba..23d7550 100644 --- a/iree/compiler/Dialect/Flow/IR/BUILD +++ b/iree/compiler/Dialect/Flow/IR/BUILD
@@ -25,8 +25,8 @@ include = ["*.td"], ), deps = [ - "//iree/compiler/Dialect/IREE/IR:td_files", "//iree/compiler/Dialect/Shape/IR:td_files", + "//iree/compiler/Dialect/Util/IR:td_files", "@llvm-project//mlir:InferTypeOpInterfaceTdFiles", "@llvm-project//mlir:OpBaseTdFiles", "@llvm-project//mlir:SideEffectTdFiles", @@ -40,28 +40,33 @@ srcs = [ "FlowDialect.cpp", "FlowEnums.cpp.inc", - "FlowInterfaces.cpp.inc", "FlowOpFolders.cpp", + "FlowOpInterfaces.cpp.inc", "FlowOpUtils.cpp", "FlowOps.cpp", "FlowOps.cpp.inc", + "FlowTypeInterfaces.cpp.inc", "FlowTypes.cpp", + "FlowTypes.cpp.inc", ], hdrs = [ "FlowDialect.h", "FlowEnums.h.inc", - "FlowInterfaces.h.inc", + "FlowOpInterfaces.h.inc", "FlowOpUtils.h", "FlowOps.h", "FlowOps.h.inc", + "FlowTypeInterfaces.h.inc", "FlowTypes.h", + "FlowTypes.h.inc", ], deps = [ ":FlowEnumsGen", ":FlowInterfacesGen", ":FlowOpsGen", - "//iree/compiler/Dialect/IREE/IR", + ":FlowTypesGen", "//iree/compiler/Dialect/Shape/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR", "@llvm-project//mlir:InferTypeOpInterface", @@ -97,11 +102,19 @@ tbl_outs = [ ( ["-gen-op-interface-decls"], - "FlowInterfaces.h.inc", + "FlowOpInterfaces.h.inc", ), ( ["-gen-op-interface-defs"], - "FlowInterfaces.cpp.inc", + "FlowOpInterfaces.cpp.inc", + ), + ( + ["-gen-type-interface-decls"], + "FlowTypeInterfaces.h.inc", + ), + ( + ["-gen-type-interface-defs"], + "FlowTypeInterfaces.cpp.inc", ), ], tblgen = "@llvm-project//mlir:mlir-tblgen", @@ -126,6 +139,31 @@ deps = [":td_files"], ) +gentbl_cc_library( + name = "FlowTypesGen", + tbl_outs = [ + ( + ["-gen-attrdef-decls"], + "FlowAttrs.h.inc", + ), + ( + ["-gen-attrdef-defs"], + "FlowAttrs.cpp.inc", + ), + ( + ["-gen-typedef-decls"], + "FlowTypes.h.inc", + ), + ( + ["-gen-typedef-defs"], + "FlowTypes.cpp.inc", + ), + ], + tblgen = "@llvm-project//mlir:mlir-tblgen", + td_file = "FlowBase.td", + deps = [":td_files"], +) + iree_tablegen_doc( name = "FlowDialectDocGen", tbl_outs = [
diff --git a/iree/compiler/Dialect/Flow/IR/CMakeLists.txt b/iree/compiler/Dialect/Flow/IR/CMakeLists.txt index 177db75..245efb5 100644 --- a/iree/compiler/Dialect/Flow/IR/CMakeLists.txt +++ b/iree/compiler/Dialect/Flow/IR/CMakeLists.txt
@@ -16,24 +16,29 @@ HDRS "FlowDialect.h" "FlowEnums.h.inc" - "FlowInterfaces.h.inc" + "FlowOpInterfaces.h.inc" "FlowOpUtils.h" "FlowOps.h" "FlowOps.h.inc" + "FlowTypeInterfaces.h.inc" "FlowTypes.h" + "FlowTypes.h.inc" SRCS "FlowDialect.cpp" "FlowEnums.cpp.inc" - "FlowInterfaces.cpp.inc" "FlowOpFolders.cpp" + "FlowOpInterfaces.cpp.inc" "FlowOpUtils.cpp" "FlowOps.cpp" "FlowOps.cpp.inc" + "FlowTypeInterfaces.cpp.inc" "FlowTypes.cpp" + "FlowTypes.cpp.inc" DEPS ::FlowEnumsGen ::FlowInterfacesGen ::FlowOpsGen + ::FlowTypesGen LLVMSupport MLIRIR MLIRInferTypeOpInterface @@ -44,8 +49,8 @@ MLIRSupport MLIRTensor MLIRTransformUtils - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Shape::IR + iree::compiler::Dialect::Util::IR PUBLIC ) @@ -65,8 +70,10 @@ TD_FILE "FlowInterfaces.td" OUTS - -gen-op-interface-decls FlowInterfaces.h.inc - -gen-op-interface-defs FlowInterfaces.cpp.inc + -gen-op-interface-decls FlowOpInterfaces.h.inc + -gen-op-interface-defs FlowOpInterfaces.cpp.inc + -gen-type-interface-decls FlowTypeInterfaces.h.inc + -gen-type-interface-defs FlowTypeInterfaces.cpp.inc ) iree_tablegen_library( @@ -79,6 +86,18 @@ -gen-op-defs FlowOps.cpp.inc ) +iree_tablegen_library( + NAME + FlowTypesGen + TD_FILE + "FlowBase.td" + OUTS + -gen-attrdef-decls FlowAttrs.h.inc + -gen-attrdef-defs FlowAttrs.cpp.inc + -gen-typedef-decls FlowTypes.h.inc + -gen-typedef-defs FlowTypes.cpp.inc +) + iree_tablegen_doc( NAME FlowDialectDocGen
diff --git a/iree/compiler/Dialect/Flow/IR/FlowBase.td b/iree/compiler/Dialect/Flow/IR/FlowBase.td index 746dfea..df488d2 100644 --- a/iree/compiler/Dialect/Flow/IR/FlowBase.td +++ b/iree/compiler/Dialect/Flow/IR/FlowBase.td
@@ -7,7 +7,8 @@ #ifndef IREE_DIALECT_FLOW_BASE #define IREE_DIALECT_FLOW_BASE -include "iree/compiler/Dialect/IREE/IR/IREEBase.td" +include "iree/compiler/Dialect/Flow/IR/FlowInterfaces.td" +include "iree/compiler/Dialect/Util/IR/UtilBase.td" include "iree/compiler/Dialect/Shape/IR/ShapeBase.td" //===----------------------------------------------------------------------===// @@ -139,4 +140,12 @@ }]; } +// TODO(benvanik): remove when we have real types using this. +def FLOW_Dummy0 : TypeDef<FLOW_Dialect, "Dummy", []> { + let mnemonic = "dummy"; +} +def FLOW_Dummy1 : AttrDef<FLOW_Dialect, "Dummy", []> { + let mnemonic = "dummy"; +} + #endif // IREE_DIALECT_FLOW_BASE
diff --git a/iree/compiler/Dialect/Flow/IR/FlowDialect.cpp b/iree/compiler/Dialect/Flow/IR/FlowDialect.cpp index fbe120f..60fa352 100644 --- a/iree/compiler/Dialect/Flow/IR/FlowDialect.cpp +++ b/iree/compiler/Dialect/Flow/IR/FlowDialect.cpp
@@ -23,8 +23,6 @@ namespace IREE { namespace Flow { -#include "iree/compiler/Dialect/Flow/IR/FlowInterfaces.cpp.inc" // IWYU pragma: keep - namespace { // Used to control inlining behavior. @@ -62,13 +60,15 @@ FlowDialect::FlowDialect(MLIRContext *context) : Dialect(getDialectNamespace(), context, TypeID::get<FlowDialect>()) { - addInterfaces<FlowInlinerInterface, FlowFolderInterface>(); - addTypes<DispatchTensorType>(); + registerAttributes(); + registerTypes(); #define GET_OP_LIST addOperations< #include "iree/compiler/Dialect/Flow/IR/FlowOps.cpp.inc" >(); + addInterfaces<FlowInlinerInterface, FlowFolderInterface>(); + context->getOrLoadDialect("shapex"); context->getOrLoadDialect<tensor::TensorDialect>(); } @@ -80,28 +80,6 @@ return nullptr; } -//===----------------------------------------------------------------------===// -// Type printing and parsing -//===----------------------------------------------------------------------===// - -Type FlowDialect::parseType(DialectAsmParser &parser) const { - llvm::StringRef spec = parser.getFullSymbolSpec(); - if (succeeded(parser.parseOptionalKeyword("dispatch.tensor"))) { - return DispatchTensorType::parse(parser); - } - parser.emitError(parser.getCurrentLocation()) - << "unknown Flow type: " << spec; - return {}; -} - -void FlowDialect::printType(Type type, DialectAsmPrinter &p) const { - if (auto inputType = type.dyn_cast<DispatchTensorType>()) { - IREE::Flow::printType(inputType, p); - } else { - llvm_unreachable("unknown Flow type"); - } -} - } // namespace Flow } // namespace IREE } // namespace iree_compiler
diff --git a/iree/compiler/Dialect/Flow/IR/FlowDialect.h b/iree/compiler/Dialect/Flow/IR/FlowDialect.h index e7cfc75..636678b 100644 --- a/iree/compiler/Dialect/Flow/IR/FlowDialect.h +++ b/iree/compiler/Dialect/Flow/IR/FlowDialect.h
@@ -9,7 +9,6 @@ #include "mlir/IR/Dialect.h" #include "mlir/IR/OpDefinition.h" -#include "mlir/IR/PatternMatch.h" #include "mlir/IR/SymbolTable.h" namespace mlir { @@ -17,8 +16,6 @@ namespace IREE { namespace Flow { -#include "iree/compiler/Dialect/Flow/IR/FlowInterfaces.h.inc" // IWYU pragma: export - class FlowDialect : public Dialect { public: explicit FlowDialect(MLIRContext *context); @@ -27,6 +24,9 @@ Operation *materializeConstant(OpBuilder &builder, Attribute value, Type type, Location loc) override; + Attribute parseAttribute(DialectAsmParser &parser, Type type) const override; + void printAttribute(Attribute attr, DialectAsmPrinter &p) const override; + Type parseType(DialectAsmParser &parser) const override; void printType(Type type, DialectAsmPrinter &p) const override; @@ -34,6 +34,10 @@ return op && op->getDialect() && op->getDialect()->getNamespace() == getDialectNamespace(); } + + private: + void registerAttributes(); + void registerTypes(); }; } // namespace Flow
diff --git a/iree/compiler/Dialect/Flow/IR/FlowInterfaces.td b/iree/compiler/Dialect/Flow/IR/FlowInterfaces.td index 5bf4fac..9fc93f8 100644 --- a/iree/compiler/Dialect/Flow/IR/FlowInterfaces.td +++ b/iree/compiler/Dialect/Flow/IR/FlowInterfaces.td
@@ -7,7 +7,7 @@ #ifndef IREE_DIALECT_FLOW_INTERFACES #define IREE_DIALECT_FLOW_INTERFACES -include "iree/compiler/Dialect/IREE/IR/IREEBase.td" +include "iree/compiler/Dialect/Util/IR/UtilBase.td" //===----------------------------------------------------------------------===// // IREE::Flow::ClosureOpInterface
diff --git a/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp b/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp index ce2f8cb..c0a34a0 100644 --- a/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp +++ b/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp
@@ -10,8 +10,8 @@ #include "iree/compiler/Dialect/Flow/IR/FlowDialect.h" #include "iree/compiler/Dialect/Flow/IR/FlowOpUtils.h" #include "iree/compiler/Dialect/Flow/IR/FlowOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" #include "iree/compiler/Dialect/Shape/IR/ShapeOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/STLExtras.h" @@ -142,11 +142,11 @@ LogicalResult matchAndRewrite(ExStreamFragmentOp op, PatternRewriter &rewriter) const override { - bool didClone = - insertTiedClones(cast<TiedOpInterface>(op.getOperation()), rewriter); + bool didClone = insertTiedClones( + cast<IREE::Util::TiedOpInterface>(op.getOperation()), rewriter); for (auto &block : op.getClosureBodyRegion()) { for (auto &innerOp : block) { - if (auto tiedOp = dyn_cast<TiedOpInterface>(innerOp)) { + if (auto tiedOp = dyn_cast<IREE::Util::TiedOpInterface>(innerOp)) { didClone |= insertTiedClones(tiedOp, rewriter); } } @@ -154,7 +154,7 @@ return success(didClone); } - bool insertTiedClones(TiedOpInterface tiedOp, + bool insertTiedClones(IREE::Util::TiedOpInterface tiedOp, PatternRewriter &rewriter) const { bool didClone = false; for (unsigned resultIndex = 0; resultIndex < tiedOp->getNumResults(); @@ -212,7 +212,7 @@ continue; // Already tied. } auto baseValue = - IREE::TiedOpInterface::findTiedBaseValue(result.value()); + IREE::Util::TiedOpInterface::findTiedBaseValue(result.value()); if (auto blockArg = baseValue.dyn_cast<BlockArgument>()) { unsigned operandIndex = blockArg.getArgNumber(); op.setTiedResultOperandIndex(result.index(), operandIndex); @@ -230,7 +230,7 @@ OwningRewritePatternList &results, MLIRContext *context) { results.insert<ClosureOptimizationPattern<ExStreamFragmentOp>>(context); results.insert<InsertImmutabilityPreservingStreamClones>(context); - // TODO(#6185): fix stream ties when types/shapes change. + // TODO(#6420): fix HAL lowering of this (or wait until streams are gone). // results.insert<TieStreamResults>(context); }
diff --git a/iree/compiler/Dialect/Flow/IR/FlowOps.cpp b/iree/compiler/Dialect/Flow/IR/FlowOps.cpp index ee5fd47..d18360f 100644 --- a/iree/compiler/Dialect/Flow/IR/FlowOps.cpp +++ b/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
@@ -7,8 +7,8 @@ #include "iree/compiler/Dialect/Flow/IR/FlowOps.h" #include "iree/compiler/Dialect/Flow/IR/FlowOpUtils.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" #include "iree/compiler/Dialect/Shape/IR/Builders.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Support/CommandLine.h" @@ -142,46 +142,18 @@ return success(); } -// Ties the |tiedResult| parsed operand back to a previously parsed operand. -// The type and any dynamic dimensions of the operand will be used for the -// result values and the operand index will be appended to |tiedOperandIndices|. -static ParseResult tieOperand( - OpAsmParser::OperandType tiedResult, OpAsmParser &parser, - ArrayRef<OpAsmParser::OperandType> operands, TypeRange operandTypes, - ArrayRef<OpAsmParser::OperandType> operandDims, - SmallVectorImpl<Type> &resultTypes, - SmallVectorImpl<OpAsmParser::OperandType> &resultDims, - SmallVectorImpl<int64_t> &tiedOperandIndices) { - int64_t operandIndex = TiedOpInterface::kUntiedIndex; +// Finds the operand index in |operands| that |tiedResult| references. +// Returns TiedOpInterface::kUntiedIndex if no operand is found. +static int64_t findTiedOperand(OpAsmParser::OperandType tiedResult, + ArrayRef<OpAsmParser::OperandType> operands) { + int64_t operandIndex = IREE::Util::TiedOpInterface::kUntiedIndex; for (int64_t i = 0; i < operands.size(); ++i) { if (operands[i].name == tiedResult.name) { operandIndex = i; break; } } - if (operandIndex == TiedOpInterface::kUntiedIndex) { - return parser.emitError(tiedResult.location, - "tied operand not found for result reference ") - << tiedResult.name; - } - - auto resultType = operandTypes[operandIndex]; - resultTypes.push_back(resultType); - tiedOperandIndices.push_back(operandIndex); - - auto shapedType = resultType.dyn_cast<ShapedType>(); - if (shapedType) { - unsigned dimsIndex = 0; - for (unsigned i = 0; i < operandIndex; ++i) { - if (auto shapedType = operandTypes[i].dyn_cast<ShapedType>()) { - dimsIndex += shapedType.getNumDynamicDims(); - } - } - resultDims.append(llvm::to_vector<4>( - operandDims.slice(dimsIndex, shapedType.getNumDynamicDims()))); - } - - return success(); + return operandIndex; } static ParseResult parseShapedResultList( @@ -194,31 +166,40 @@ do { OpAsmParser::OperandType tiedResult; auto res = parser.parseOptionalOperand(tiedResult); + Type type; + int64_t tiedOperandIndex = IREE::Util::TiedOpInterface::kUntiedIndex; if (res.hasValue() && succeeded(res.getValue())) { - if (failed(tieOperand(tiedResult, parser, operands, operandTypes, - operandDims, resultTypes, resultDims, - tiedOperandIndices))) { - return failure(); + tiedOperandIndex = findTiedOperand(tiedResult, operands); + if (tiedOperandIndex == IREE::Util::TiedOpInterface::kUntiedIndex) { + return parser.emitError(tiedResult.location, + "tied operand not found for result reference ") + << tiedResult.name; } - } else { - Type type; - if (failed(parser.parseType(type))) return failure(); - if (auto shapedType = type.dyn_cast<ShapedType>()) { - if (!shapedType.hasStaticShape()) { - SmallVector<OpAsmParser::OperandType, 4> dynamicDims; - if (failed(parser.parseLBrace()) || - failed(parser.parseOperandList(dynamicDims, - shapedType.getNumDynamicDims(), - OpAsmParser::Delimiter::None)) || - failed(parser.parseRBrace())) { - return failure(); - } - resultDims.append(dynamicDims); - } + if (succeeded(parser.parseOptionalKeyword("as"))) { + // Type _may_ differ from the operand. + if (failed(parser.parseType(type))) return failure(); + } else { + // Use the operands type. + type = operandTypes[tiedOperandIndex]; } - resultTypes.push_back(type); - tiedOperandIndices.push_back(TiedOpInterface::kUntiedIndex); + } else if (failed(parser.parseType(type))) { + return failure(); } + if (auto shapedType = type.dyn_cast<ShapedType>()) { + if (!shapedType.hasStaticShape()) { + SmallVector<OpAsmParser::OperandType, 4> dynamicDims; + if (failed(parser.parseLBrace()) || + failed(parser.parseOperandList(dynamicDims, + shapedType.getNumDynamicDims(), + OpAsmParser::Delimiter::None)) || + failed(parser.parseRBrace())) { + return failure(); + } + resultDims.append(dynamicDims); + } + } + resultTypes.push_back(type); + tiedOperandIndices.push_back(tiedOperandIndex); } while (succeeded(parser.parseOptionalComma())); if (!tiedOperandIndices.empty()) { tiedOperands = parser.getBuilder().getIndexArrayAttr(tiedOperandIndices); @@ -284,27 +265,36 @@ }); p << ") -> "; if (resultTypes.size() != 1) p << "("; - auto tiedOp = cast<TiedOpInterface>(op); + auto tiedOp = cast<IREE::Util::TiedOpInterface>(op); for (unsigned i = 0; i < resultTypes.size(); ++i) { - auto tiedOperand = tiedOp.getTiedResultOperandIndex(i); - if (tiedOperand.hasValue()) { - p.printOperand(op->getOperand(tiedOperand.getValue())); - } else { - auto type = resultTypes[i]; - p.printType(type); - if (auto shapedType = type.dyn_cast<ShapedType>()) { - if (!shapedType.hasStaticShape()) { - if (resultDims.empty()) { - p << "{<<INVALID>>}"; - return; - } - p << "{"; - llvm::interleaveComma( - resultDims.take_front(shapedType.getNumDynamicDims()), p, - [&](Value value) { p.printOperand(value); }); - p << "}"; - resultDims = resultDims.drop_front(shapedType.getNumDynamicDims()); + auto resultType = resultTypes[i]; + auto tiedOperandIndex = tiedOp.getTiedResultOperandIndex(i); + bool printType = true; + if (tiedOperandIndex.hasValue()) { + auto tiedOperand = op->getOperand(tiedOperandIndex.getValue()); + p.printOperand(tiedOperand); + if (tiedOperand.getType() != resultType) { + p << " as "; + } else { + // Type elided as it matches the operand. + printType = false; + } + } + if (printType) { + p.printType(resultType); + } + if (auto shapedType = resultType.dyn_cast<ShapedType>()) { + if (!shapedType.hasStaticShape()) { + if (resultDims.empty()) { + p << "{<<INVALID>>}"; + return; } + p << "{"; + llvm::interleaveComma( + resultDims.take_front(shapedType.getNumDynamicDims()), p, + [&](Value value) { p.printOperand(value); }); + p << "}"; + resultDims = resultDims.drop_front(shapedType.getNumDynamicDims()); } } if (i < resultTypes.size() - 1) p << ", "; @@ -509,7 +499,7 @@ static LogicalResult verifyVariableLoadIndirectOp(VariableLoadIndirectOp &op) { auto variableType = - op.variable().getType().cast<IREE::PtrType>().getTargetType(); + op.variable().getType().cast<IREE::Util::PtrType>().getTargetType(); auto loadType = op.result().getType(); if (!isVariableTypeCompatible(variableType, loadType)) { return op.emitOpError() << "variable type mismatch; variable pointer is " @@ -548,7 +538,7 @@ static LogicalResult verifyVariableStoreIndirectOp( VariableStoreIndirectOp &op) { auto variableType = - op.variable().getType().cast<IREE::PtrType>().getTargetType(); + op.variable().getType().cast<IREE::Util::PtrType>().getTargetType(); auto storeType = op.value().getType(); if (!isVariableTypeCompatible(variableType, storeType)) { return op.emitOpError() << "variable type mismatch; variable pointer is " @@ -665,8 +655,8 @@ state.addOperands(operandDims); state.addOperands(resultDims); state.addAttributes(attributes); - state.attributes.erase(TiedOpInterface::getStorageAttrName()); - state.addAttribute(TiedOpInterface::getStorageAttrName(), + state.attributes.erase(IREE::Util::TiedOpInterface::getStorageAttrName()); + state.addAttribute(IREE::Util::TiedOpInterface::getStorageAttrName(), builder.getIndexArrayAttr(tiedOperands)); state.attributes.erase("operand_segment_sizes"); state.addAttribute("operand_segment_sizes", @@ -689,7 +679,7 @@ for (unsigned resultIndex = 0; resultIndex < tiedOperands.size(); ++resultIndex) { int64_t tiedOperandIndex = tiedOperands[resultIndex]; - if (tiedOperandIndex != TiedOpInterface::kUntiedIndex) { + if (tiedOperandIndex != IREE::Util::TiedOpInterface::kUntiedIndex) { operandAliases[tiedOperandIndex] = true; resultAliases[resultIndex] = true; } @@ -864,14 +854,14 @@ // operands. unsigned tiedOperandOffset = getTiedOperandsIndexAndLength().first; for (unsigned i = 0; i < newTiedOperandIndices.size(); ++i) { - if (newTiedOperandIndices[i] != TiedOpInterface::kUntiedIndex) { + if (newTiedOperandIndices[i] != IREE::Util::TiedOpInterface::kUntiedIndex) { newTiedOperandIndices[i] -= tiedOperandOffset; } } // This need to happen *after* accounting for tied operand offset, given that // all excluded operand/result indices are relative ranges. - excludeTiedOperandAndResultIndices( + IREE::Util::excludeTiedOperandAndResultIndices( excludedOperandIndices, excludedResultIndices, newTiedOperandIndices); auto newOp = rewriter.create<DispatchWorkgroupsOp>( @@ -1092,8 +1082,9 @@ state.addOperands(operandDims); state.addOperands(resultDims); state.addAttributes(attributes); - state.attributes.erase(TiedOpInterface::getStorageAttrName()); - state.addAttribute(TiedOpInterface::getStorageAttrName(), tiedOperands); + state.attributes.erase(IREE::Util::TiedOpInterface::getStorageAttrName()); + state.addAttribute(IREE::Util::TiedOpInterface::getStorageAttrName(), + tiedOperands); state.attributes.erase("operand_segment_sizes"); state.addAttribute("operand_segment_sizes", builder.getI32VectorAttr({ @@ -1153,7 +1144,7 @@ } Value TensorReshapeOp::getTiedResult(unsigned resultIndex) { - return IREE::TiedOpInterface::findTiedBaseValue(source()); + return IREE::Util::TiedOpInterface::findTiedBaseValue(source()); } ::llvm::Optional<unsigned> TensorReshapeOp::getTiedResultOperandIndex( @@ -1257,7 +1248,7 @@ } Value TensorUpdateOp::getTiedResult(unsigned resultIndex) { - return IREE::TiedOpInterface::findTiedBaseValue(target()); + return IREE::Util::TiedOpInterface::findTiedBaseValue(target()); } ::llvm::Optional<unsigned> TensorUpdateOp::getTiedResultOperandIndex( @@ -1283,8 +1274,8 @@ state.addOperands(operandDims); state.addOperands(resultDims); state.addAttributes(attributes); - state.attributes.erase(TiedOpInterface::getStorageAttrName()); - state.addAttribute(TiedOpInterface::getStorageAttrName(), + state.attributes.erase(IREE::Util::TiedOpInterface::getStorageAttrName()); + state.addAttribute(IREE::Util::TiedOpInterface::getStorageAttrName(), builder.getIndexArrayAttr(tiedOperands)); state.attributes.erase("operand_segment_sizes"); state.addAttribute("operand_segment_sizes", @@ -1425,7 +1416,7 @@ auto newTiedOperandIndices = llvm::to_vector<4>(getTiedResultOperandIndices()); - excludeTiedOperandAndResultIndices( + IREE::Util::excludeTiedOperandAndResultIndices( excludedOperandIndices, excludedResultIndices, newTiedOperandIndices); assert(getTiedOperandsIndexAndLength().first == 0 && "operands must be the first ODS group");
diff --git a/iree/compiler/Dialect/Flow/IR/FlowOps.h b/iree/compiler/Dialect/Flow/IR/FlowOps.h index aba3566..1da0eb9 100644 --- a/iree/compiler/Dialect/Flow/IR/FlowOps.h +++ b/iree/compiler/Dialect/Flow/IR/FlowOps.h
@@ -11,9 +11,10 @@ #include "iree/compiler/Dialect/Flow/IR/FlowDialect.h" #include "iree/compiler/Dialect/Flow/IR/FlowTypes.h" -#include "iree/compiler/Dialect/IREE/IR/IREETraits.h" #include "iree/compiler/Dialect/Shape/IR/ShapeDialect.h" #include "iree/compiler/Dialect/Shape/IR/ShapeTypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTraits.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h"
diff --git a/iree/compiler/Dialect/Flow/IR/FlowOps.td b/iree/compiler/Dialect/Flow/IR/FlowOps.td index cd1b2f4..bc83f12 100644 --- a/iree/compiler/Dialect/Flow/IR/FlowOps.td +++ b/iree/compiler/Dialect/Flow/IR/FlowOps.td
@@ -9,7 +9,7 @@ include "iree/compiler/Dialect/Flow/IR/FlowBase.td" include "iree/compiler/Dialect/Flow/IR/FlowInterfaces.td" -include "iree/compiler/Dialect/IREE/IR/IREEInterfaces.td" +include "iree/compiler/Dialect/Util/IR/UtilInterfaces.td" include "iree/compiler/Dialect/Shape/IR/ShapeInterfaces.td" include "mlir/IR/OpAsmInterface.td" include "mlir/IR/SymbolInterfaces.td" @@ -174,7 +174,7 @@ AttrSizedOperandSegments, SingleBlockImplicitTerminator<"IREE::Flow::ReturnOp">, DeclareOpInterfaceMethods<FLOW_ClosureOpInterface>, - DeclareOpInterfaceMethods<IREE_TiedOpInterface, [ + DeclareOpInterfaceMethods<Util_TiedOpInterface, [ "getTiedOperandsIndexAndLength", ]>, DeclareOpInterfaceMethods<Shape_ShapeCarryingOpInterface>, @@ -220,7 +220,7 @@ Variadic<AnyType>:$operands, FLOW_ShapeDynamicDims:$operand_dims, FLOW_ShapeDynamicDims:$result_dims, - OptionalAttr<IREE_TiedOpStorageAttr>:$tied_operands + OptionalAttr<Util_TiedOpStorageAttr>:$tied_operands ); let results = (outs Variadic<AnyType>:$results @@ -658,7 +658,7 @@ def FLOW_DispatchOp : FLOW_PureOp<"dispatch", [ AttrSizedOperandSegments, FLOW_StreamableOp, - DeclareOpInterfaceMethods<IREE_TiedOpInterface, [ + DeclareOpInterfaceMethods<Util_TiedOpInterface, [ "getTiedOperandsIndexAndLength", ]>, DeclareOpInterfaceMethods<Shape_ShapeCarryingOpInterface>, @@ -676,7 +676,7 @@ Variadic<AnyType>:$operands, FLOW_ShapeDynamicDims:$operand_dims, FLOW_ShapeDynamicDims:$result_dims, - OptionalAttr<IREE_TiedOpStorageAttr>:$tied_operands + OptionalAttr<Util_TiedOpStorageAttr>:$tied_operands ); let results = (outs Variadic<AnyType>:$results @@ -732,7 +732,7 @@ FLOW_StreamableOp, AllElementTypesMatch<["source", "result"]>, AttrSizedOperandSegments, - DeclareOpInterfaceMethods<IREE_TiedOpInterface, [ + DeclareOpInterfaceMethods<Util_TiedOpInterface, [ "getTiedResult", "getTiedResultOperandIndex", "getTiedResultOperandIndices", @@ -991,7 +991,7 @@ AllTypesMatch<["target", "result"]>, AllElementTypesMatch<["update", "target", "result"]>, AttrSizedOperandSegments, - DeclareOpInterfaceMethods<IREE_TiedOpInterface, [ + DeclareOpInterfaceMethods<Util_TiedOpInterface, [ "getTiedResult", "getTiedResultOperandIndex", "getTiedResultOperandIndices", @@ -1010,7 +1010,7 @@ Variadic<FLOW_Dim>:$start_indices, FLOW_Tensor:$update, FLOW_ShapeDynamicDims:$update_dims, - OptionalAttr<IREE_TiedOpStorageAttr>:$tied_operands + OptionalAttr<Util_TiedOpStorageAttr>:$tied_operands ); let results = (outs FLOW_Tensor:$result @@ -1067,7 +1067,7 @@ IsolatedFromAbove, AttrSizedOperandSegments, DeclareOpInterfaceMethods<FLOW_ClosureOpInterface>, - DeclareOpInterfaceMethods<IREE_TiedOpInterface>, + DeclareOpInterfaceMethods<Util_TiedOpInterface>, DeclareOpInterfaceMethods<Shape_ShapeCarryingOpInterface>, ]> { let summary = [{experimental op for defining formed stream regions}]; @@ -1081,7 +1081,7 @@ Variadic<AnyType>:$operands, FLOW_ShapeDynamicDims:$operand_dims, FLOW_ShapeDynamicDims:$result_dims, - OptionalAttr<IREE_TiedOpStorageAttr>:$tied_operands + OptionalAttr<Util_TiedOpStorageAttr>:$tied_operands ); let results = (outs Variadic<AnyType>:$results
diff --git a/iree/compiler/Dialect/Flow/IR/FlowTypes.cpp b/iree/compiler/Dialect/Flow/IR/FlowTypes.cpp index a443e45..4fdc6ad 100644 --- a/iree/compiler/Dialect/Flow/IR/FlowTypes.cpp +++ b/iree/compiler/Dialect/Flow/IR/FlowTypes.cpp
@@ -6,8 +6,16 @@ #include "iree/compiler/Dialect/Flow/IR/FlowTypes.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/TypeSwitch.h" +#include "mlir/IR/DialectImplementation.h" + // clang-format off: must be included after all LLVM/MLIR headers. +#define GET_ATTRDEF_CLASSES +#include "iree/compiler/Dialect/Flow/IR/FlowAttrs.cpp.inc" // IWYU pragma: keep #include "iree/compiler/Dialect/Flow/IR/FlowEnums.cpp.inc" // IWYU pragma: keep +#define GET_TYPEDEF_CLASSES +#include "iree/compiler/Dialect/Flow/IR/FlowTypes.cpp.inc" // IWYU pragma: keep // clang-format on namespace mlir { @@ -16,7 +24,7 @@ namespace Flow { //===----------------------------------------------------------------------===// -// Object types +// !flow.dispatch.tensor //===----------------------------------------------------------------------===// // static @@ -160,6 +168,74 @@ p << '>'; } +//===----------------------------------------------------------------------===// +// Dialect registration +//===----------------------------------------------------------------------===// + +#include "iree/compiler/Dialect/Flow/IR/FlowOpInterfaces.cpp.inc" // IWYU pragma: keep +#include "iree/compiler/Dialect/Flow/IR/FlowTypeInterfaces.cpp.inc" // IWYU pragma: keep + +void FlowDialect::registerAttributes() { + addAttributes< +#define GET_ATTRDEF_LIST +#include "iree/compiler/Dialect/Flow/IR/FlowAttrs.cpp.inc" // IWYU pragma: keep + >(); +} + +void FlowDialect::registerTypes() { + addTypes<DispatchTensorType>(); + addTypes< +#define GET_TYPEDEF_LIST +#include "iree/compiler/Dialect/Flow/IR/FlowTypes.cpp.inc" // IWYU pragma: keep + >(); +} + +//===----------------------------------------------------------------------===// +// Type printing and parsing +//===----------------------------------------------------------------------===// + +Attribute FlowDialect::parseAttribute(DialectAsmParser &parser, + Type type) const { + StringRef mnemonic; + if (failed(parser.parseKeyword(&mnemonic))) return {}; + Attribute attr; + auto parseResult = + generatedAttributeParser(getContext(), parser, mnemonic, type, attr); + if (parseResult.hasValue()) return attr; + parser.emitError(parser.getCurrentLocation()) + << "unknown Flow attribute: " << mnemonic; + return {}; +} + +void FlowDialect::printAttribute(Attribute attr, DialectAsmPrinter &p) const { + if (failed(generatedAttributePrinter(attr, p))) { + llvm_unreachable("unknown Flow attribute"); + } +} + +Type FlowDialect::parseType(DialectAsmParser &parser) const { + StringRef mnemonic; + if (failed(parser.parseKeyword(&mnemonic))) return {}; + Type type; + OptionalParseResult parseResult = + generatedTypeParser(getContext(), parser, mnemonic, type); + if (parseResult.hasValue()) return type; + if (mnemonic == "dispatch.tensor") { + return DispatchTensorType::parse(parser); + } + parser.emitError(parser.getCurrentLocation()) + << "unknown Flow type: " << mnemonic; + return {}; +} + +void FlowDialect::printType(Type type, DialectAsmPrinter &p) const { + if (auto inputType = type.dyn_cast<DispatchTensorType>()) { + IREE::Flow::printType(inputType, p); + } else if (failed(generatedTypePrinter(type, p))) { + llvm_unreachable("unknown Flow type"); + } +} + } // namespace Flow } // namespace IREE } // namespace iree_compiler
diff --git a/iree/compiler/Dialect/Flow/IR/FlowTypes.h b/iree/compiler/Dialect/Flow/IR/FlowTypes.h index 41176e3..11304f7 100644 --- a/iree/compiler/Dialect/Flow/IR/FlowTypes.h +++ b/iree/compiler/Dialect/Flow/IR/FlowTypes.h
@@ -7,14 +7,16 @@ #ifndef IREE_COMPILER_DIALECT_FLOW_IR_FLOWTYPES_H_ #define IREE_COMPILER_DIALECT_FLOW_IR_FLOWTYPES_H_ -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Flow/IR/FlowDialect.h" #include "iree/compiler/Dialect/Shape/IR/ShapeTypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringSwitch.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectImplementation.h" #include "mlir/IR/OpImplementation.h" +#include "mlir/IR/PatternMatch.h" #include "mlir/IR/TypeSupport.h" #include "mlir/IR/Types.h" #include "mlir/Support/LLVM.h" @@ -28,6 +30,9 @@ namespace IREE { namespace Flow { +#include "iree/compiler/Dialect/Flow/IR/FlowOpInterfaces.h.inc" // IWYU pragma: export +#include "iree/compiler/Dialect/Flow/IR/FlowTypeInterfaces.h.inc" // IWYU pragma: export + //===----------------------------------------------------------------------===// // Object types //===----------------------------------------------------------------------===// @@ -183,4 +188,11 @@ } // namespace iree_compiler } // namespace mlir +// clang-format off: must be included after all LLVM/MLIR headers. +#define GET_ATTRDEF_CLASSES +#include "iree/compiler/Dialect/Flow/IR/FlowAttrs.h.inc" // IWYU pragma: keep +#define GET_TYPEDEF_CLASSES +#include "iree/compiler/Dialect/Flow/IR/FlowTypes.h.inc" // IWYU pragma: keep +// clang-format on + #endif // IREE_COMPILER_DIALECT_FLOW_IR_FLOWTYPES_H_
diff --git a/iree/compiler/Dialect/Flow/IR/test/dispatch_ops.mlir b/iree/compiler/Dialect/Flow/IR/test/dispatch_ops.mlir index 7cb22d0..298bb75 100644 --- a/iree/compiler/Dialect/Flow/IR/test/dispatch_ops.mlir +++ b/iree/compiler/Dialect/Flow/IR/test/dispatch_ops.mlir
@@ -39,7 +39,21 @@ %dim0 = constant 100 : index // CHECK-DAG: %[[DIM1:.+]] = constant 200 %dim1 = constant 200 : index - // CHECK: %0:2 = flow.dispatch @ex0::@dispatch_fn[%[[CST]]](%[[CST]], %arg0, %arg1) : (index, tensor<4x?xf32>{%[[DIM0]]}, tensor<8x?xf32>{%[[DIM1]]}) -> (%arg0, %arg1) - %0, %1 = flow.dispatch @ex0::@dispatch_fn[%cst](%cst, %arg0, %arg1) : (index, tensor<4x?xf32>{%dim0}, tensor<8x?xf32>{%dim1}) -> (%arg0, %arg1) + // CHECK: %0:2 = flow.dispatch @ex0::@dispatch_fn[%[[CST]]](%[[CST]], %arg0, %arg1) : (index, tensor<4x?xf32>{%[[DIM0]]}, tensor<8x?xf32>{%[[DIM1]]}) -> (%arg0{%[[DIM1]]}, %arg1{%[[DIM0]]}) + %0, %1 = flow.dispatch @ex0::@dispatch_fn[%cst](%cst, %arg0, %arg1) : (index, tensor<4x?xf32>{%dim0}, tensor<8x?xf32>{%dim1}) -> (%arg0{%dim1}, %arg1{%dim0}) return %0, %1 : tensor<4x?xf32>, tensor<8x?xf32> } + +// ----- + +// CHECK-LABEL: @inplaceTypeChange +// CHECK-SAME: (%[[ARG0:.+]]: tensor<4x?xf32>) +func @inplaceTypeChange(%arg0: tensor<4x?xf32>) -> tensor<?x4xf32> { + // CHECK-DAG: %[[CST:.+]] = constant 4 + %cst = constant 4 : index + // CHECK-DAG: %[[DIM0:.+]] = constant 100 + %dim0 = constant 100 : index + // CHECK: %0 = flow.dispatch @ex0::@dispatch_fn[%[[CST]]](%[[ARG0]]) : (tensor<4x?xf32>{%[[DIM0]]}) -> %arg0 as tensor<?x4xf32>{%[[DIM0]]} + %0 = flow.dispatch @ex0::@dispatch_fn[%cst](%arg0) : (tensor<4x?xf32>{%dim0}) -> %arg0 as tensor<?x4xf32>{%dim0} + return %0 : tensor<?x4xf32> +}
diff --git a/iree/compiler/Dialect/Flow/IR/test/dispatch_workgroups.mlir b/iree/compiler/Dialect/Flow/IR/test/dispatch_workgroups.mlir index f5b997d..9231857 100644 --- a/iree/compiler/Dialect/Flow/IR/test/dispatch_workgroups.mlir +++ b/iree/compiler/Dialect/Flow/IR/test/dispatch_workgroups.mlir
@@ -99,8 +99,8 @@ // CHECK: %[[OUTER_RET0:.+]] = flow.dispatch.workgroups[ // CHECK-SAME: %[[WORKGROUP_COUNT_X]], %[[WORKGROUP_COUNT_Y]] // CHECK-SAME: ](%[[ARG0]], %[[ARG1]]) - // CHECK-SAME: : (tensor<?x4xf32>{%c128}, index) -> %arg0 = - %0 = flow.dispatch.workgroups[%x, %y](%arg0, %arg1) : (tensor<?x4xf32>{%c128}, index) -> %arg0 = + // CHECK-SAME: : (tensor<?x4xf32>{%c128}, index) -> %arg0{%c128} = + %0 = flow.dispatch.workgroups[%x, %y](%arg0, %arg1) : (tensor<?x4xf32>{%c128}, index) -> %arg0{%c128} = // CHECK-NEXT: (%[[INNER_ARG0:.+]]: !flow.dispatch.tensor<readwrite:?x4xf32> // CHECK-SAME: %[[INNER_ARG1:.+]]: index) { (%arg0_capture: !flow.dispatch.tensor<readwrite:?x4xf32>, %arg1_capture: index) {
diff --git a/iree/compiler/Dialect/Flow/IR/test/stream_folding.mlir b/iree/compiler/Dialect/Flow/IR/test/stream_folding.mlir index bac3cac..b357d60 100644 --- a/iree/compiler/Dialect/Flow/IR/test/stream_folding.mlir +++ b/iree/compiler/Dialect/Flow/IR/test/stream_folding.mlir
@@ -68,8 +68,8 @@ %arg1: tensor<8x?xf32>, %dim1: index) -> tensor<8x?xf32> { // CHECK: flow.ex.stream.fragment(%[[ARG1]]) : %0:2 = flow.ex.stream.fragment(%arg0, %arg1) : - // CHECK-SAME: (tensor<8x?xf32>{%[[DIM1]]}) -> %[[ARG1]] = - (tensor<4x?xf32>{%dim0}, tensor<8x?xf32>{%dim1}) -> (%arg0, %arg1) = + // CHECK-SAME: (tensor<8x?xf32>{%[[DIM1]]}) -> %[[ARG1]]{%[[DIM1]]} = + (tensor<4x?xf32>{%dim0}, tensor<8x?xf32>{%dim1}) -> (%arg0{%dim0}, %arg1{%dim1}) = // CHECK-NEXT: (%[[INNER_ARG:.+]]: tensor<8x?xf32>) -> tensor<8x?xf32> (%unused: tensor<4x?xf32>, %arg1: tensor<8x?xf32>) -> (tensor<4x?xf32>, tensor<8x?xf32>) { // CHECK-NEXT: flow.return %[[INNER_ARG]] : tensor<8x?xf32>
diff --git a/iree/compiler/Dialect/Flow/IR/test/stream_ops.mlir b/iree/compiler/Dialect/Flow/IR/test/stream_ops.mlir index 1dab1a9..f7323f5 100644 --- a/iree/compiler/Dialect/Flow/IR/test/stream_ops.mlir +++ b/iree/compiler/Dialect/Flow/IR/test/stream_ops.mlir
@@ -1,5 +1,3 @@ -// Tests printing and parsing of stream ops. - // RUN: iree-opt -split-input-file %s | iree-opt -split-input-file | IreeFileCheck %s flow.executable @dispatch_0 { @@ -29,3 +27,23 @@ // CHECK-NEXT: return return %0#0, %0#1 : tensor<4xf32>, tensor<4xf32> } + +// ----- + +// CHECK-LABEL: func @typeChange +// CHECK-SAME: (%[[ARG0:.+]]: tensor<?x?xf32>, %[[DIM0:.+]]: index, %[[DIM1:.+]]: index) +func @typeChange(%arg0: tensor<?x?xf32>, %dim0: index, %dim1: index) -> (tensor<4x?xf32>) { + // CHECK: %[[RET:.+]] = flow.ex.stream.fragment(%[[ARG0]], %[[DIM0]], %[[DIM1]]) : + // CHECK-SAME: (tensor<?x?xf32>{%[[DIM0]], %[[DIM1]]}, index, index) -> %[[ARG0]] as tensor<4x?xf32>{%[[DIM1]]} = + // CHECK-NEXT: (%[[STREAM_ARG0:.+]]: tensor<?x?xf32>, %[[STREAM_DIM0:.+]]: index, %[[STREAM_DIM1:.+]]: index) -> tensor<4x?xf32> { + %0 = flow.ex.stream.fragment(%arg0, %dim0, %dim1) : (tensor<?x?xf32>{%dim0, %dim1}, index, index) -> %arg0 as tensor<4x?xf32>{%dim1} = + (%stream_arg0: tensor<?x?xf32>, %stream_dim0: index, %stream_dim1: index) -> tensor<4x?xf32> { + // CHECK-NEXT: %[[STREAM_RET:.+]] = flow.tensor.reshape %[[STREAM_ARG0:.+]] : tensor<?x?xf32>{%[[STREAM_DIM0]], %[[STREAM_DIM1]]} -> tensor<4x?xf32>{%[[STREAM_DIM1]]} + %1 = flow.tensor.reshape %stream_arg0 : tensor<?x?xf32>{%stream_dim0, %stream_dim1} -> tensor<4x?xf32>{%stream_dim1} + // CHECK-NEXT: flow.return %[[STREAM_RET]] : tensor<4x?xf32> + flow.return %1 : tensor<4x?xf32> + // CHECK-NEXT: } + } + // CHECK-NEXT: return %[[RET]] : tensor<4x?xf32> + return %0 : tensor<4x?xf32> +}
diff --git a/iree/compiler/Dialect/Flow/IR/test/variable_folding.mlir b/iree/compiler/Dialect/Flow/IR/test/variable_folding.mlir index 09c62b3..3482898 100644 --- a/iree/compiler/Dialect/Flow/IR/test/variable_folding.mlir +++ b/iree/compiler/Dialect/Flow/IR/test/variable_folding.mlir
@@ -56,9 +56,9 @@ flow.variable @v : tensor<4xf32> // CHECK-LABEL: @fold_load_indirect func @fold_load_indirect() -> tensor<4xf32> { - %0 = flow.variable.address @v : !iree.ptr<tensor<4xf32>> + %0 = flow.variable.address @v : !util.ptr<tensor<4xf32>> // CHECK-NEXT: = flow.variable.load @v - %1 = flow.variable.load.indirect %0 : !iree.ptr<tensor<4xf32>> -> tensor<4xf32> + %1 = flow.variable.load.indirect %0 : !util.ptr<tensor<4xf32>> -> tensor<4xf32> return %1 : tensor<4xf32> } @@ -67,8 +67,8 @@ flow.variable @v mutable : tensor<4xf32> // CHECK-LABEL: @fold_store_indirect func @fold_store_indirect(%arg0 : tensor<4xf32>) { - %0 = flow.variable.address @v : !iree.ptr<tensor<4xf32>> + %0 = flow.variable.address @v : !util.ptr<tensor<4xf32>> // CHECK-NEXT: flow.variable.store %arg0, @v - flow.variable.store.indirect %arg0, %0 : tensor<4xf32> -> !iree.ptr<tensor<4xf32>> + flow.variable.store.indirect %arg0, %0 : tensor<4xf32> -> !util.ptr<tensor<4xf32>> return }
diff --git a/iree/compiler/Dialect/Flow/IR/test/variable_ops.mlir b/iree/compiler/Dialect/Flow/IR/test/variable_ops.mlir index 87a691e..7b9fe83 100644 --- a/iree/compiler/Dialect/Flow/IR/test/variable_ops.mlir +++ b/iree/compiler/Dialect/Flow/IR/test/variable_ops.mlir
@@ -46,9 +46,9 @@ // CHECK-LABEL: @loaded_indirect func @loaded_indirect() { // CHECK-NEXT: %[[ADDR:.+]] = flow.variable.address @v_loaded - %0 = flow.variable.address @v_loaded : !iree.ptr<tensor<4xf32>> + %0 = flow.variable.address @v_loaded : !util.ptr<tensor<4xf32>> // CHECK-NEXT: = flow.variable.load.indirect %[[ADDR]] - %1 = flow.variable.load.indirect %0 : !iree.ptr<tensor<4xf32>> -> tensor<4xf32> + %1 = flow.variable.load.indirect %0 : !util.ptr<tensor<4xf32>> -> tensor<4xf32> return } @@ -60,8 +60,8 @@ // CHECK-NEXT: %[[VALUE:.+]] = "test_flow.tensor" %0 = "test_flow.tensor"() : () -> tensor<4xf32> // CHECK-NEXT: %[[ADDR:.+]] = flow.variable.address @v_stored - %1 = flow.variable.address @v_stored : !iree.ptr<tensor<4xf32>> + %1 = flow.variable.address @v_stored : !util.ptr<tensor<4xf32>> // CHECK-NEXT: flow.variable.store.indirect %[[VALUE]], %[[ADDR]] - flow.variable.store.indirect %0, %1 : tensor<4xf32> -> !iree.ptr<tensor<4xf32>> + flow.variable.store.indirect %0, %1 : tensor<4xf32> -> !util.ptr<tensor<4xf32>> return }
diff --git a/iree/compiler/Dialect/Flow/Transforms/BUILD b/iree/compiler/Dialect/Flow/Transforms/BUILD index e7515c9..9b0d0a6 100644 --- a/iree/compiler/Dialect/Flow/Transforms/BUILD +++ b/iree/compiler/Dialect/Flow/Transforms/BUILD
@@ -33,7 +33,8 @@ srcs = [ "ConvertConv2D1x1ToMatmulPass.cpp", "ConvertConv2DToImg2ColPass.cpp", - "ConvertToFlowTensorOps.cpp", + "ConvertLinalgTensorOps.cpp", + "ConvertTensorOps.cpp", "DeduplicateExecutables.cpp", "DestructiveUpdateUtils.cpp", "DispatchLinalgOnTensors.cpp", @@ -68,12 +69,12 @@ "//iree/compiler/Dialect/Flow/Conversion/TensorToFlow", "//iree/compiler/Dialect/Flow/IR", "//iree/compiler/Dialect/HAL/IR", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/LinalgExt/IR", "//iree/compiler/Dialect/LinalgExt/Transforms", "//iree/compiler/Dialect/Shape/IR", "//iree/compiler/Dialect/Shape/Transforms", "//iree/compiler/Dialect/Shape/Utils:TypeConversion", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Utils", "@llvm-project//llvm:Support", "@llvm-project//mlir:Affine",
diff --git a/iree/compiler/Dialect/Flow/Transforms/CMakeLists.txt b/iree/compiler/Dialect/Flow/Transforms/CMakeLists.txt index d9a29d3..17924a2 100644 --- a/iree/compiler/Dialect/Flow/Transforms/CMakeLists.txt +++ b/iree/compiler/Dialect/Flow/Transforms/CMakeLists.txt
@@ -30,7 +30,8 @@ SRCS "ConvertConv2D1x1ToMatmulPass.cpp" "ConvertConv2DToImg2ColPass.cpp" - "ConvertToFlowTensorOps.cpp" + "ConvertLinalgTensorOps.cpp" + "ConvertTensorOps.cpp" "DeduplicateExecutables.cpp" "DestructiveUpdateUtils.cpp" "DispatchLinalgOnTensors.cpp" @@ -72,12 +73,12 @@ iree::compiler::Dialect::Flow::Conversion::TensorToFlow iree::compiler::Dialect::Flow::IR iree::compiler::Dialect::HAL::IR - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::LinalgExt::IR iree::compiler::Dialect::LinalgExt::Transforms iree::compiler::Dialect::Shape::IR iree::compiler::Dialect::Shape::Transforms iree::compiler::Dialect::Shape::Utils::TypeConversion + iree::compiler::Dialect::Util::IR iree::compiler::Utils PUBLIC )
diff --git a/iree/compiler/Dialect/Flow/Transforms/ConvertToFlowTensorOps.cpp b/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgTensorOps.cpp similarity index 83% rename from iree/compiler/Dialect/Flow/Transforms/ConvertToFlowTensorOps.cpp rename to iree/compiler/Dialect/Flow/Transforms/ConvertLinalgTensorOps.cpp index a4e302a..b89682c 100644 --- a/iree/compiler/Dialect/Flow/Transforms/ConvertToFlowTensorOps.cpp +++ b/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgTensorOps.cpp
@@ -4,14 +4,12 @@ // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -#include "iree/compiler/Dialect/Flow/Conversion/TensorToFlow/ConvertTensorToFlow.h" #include "iree/compiler/Dialect/Flow/IR/FlowDialect.h" #include "iree/compiler/Dialect/Flow/IR/FlowOps.h" #include "iree/compiler/Dialect/Flow/IR/FlowTypes.h" #include "iree/compiler/Dialect/Flow/Transforms/PassDetail.h" #include "iree/compiler/Dialect/Flow/Transforms/Passes.h" #include "mlir/Dialect/Linalg/IR/LinalgOps.h" -#include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/IR/PatternMatch.h" @@ -20,7 +18,7 @@ #include "mlir/Support/LLVM.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" -#define DEBUG_TYPE "iree-flow-convert-to-flow-tensor-ops" +#define DEBUG_TYPE "iree-flow-convert-linalg-tensor-ops" namespace mlir { namespace iree_compiler { @@ -29,7 +27,7 @@ namespace { -/// Generates `memref.dim` operations to get the dynamic sizes of a value `v`. +/// Generates `tensor.dim` operations to get the dynamic sizes of a value `v`. static SmallVector<Value, 4> getDynamicDimValues(OpBuilder &b, Location loc, Value v) { SmallVector<Value, 4> dynamicDims; @@ -92,19 +90,19 @@ } }; -/// Converts operations that can map to flow.tensor.* operations. -struct ConvertToFlowTensorOpsPass - : public ConvertToFlowTensorOpsBase<ConvertToFlowTensorOpsPass> { - ConvertToFlowTensorOpsPass(bool runBefore) { +/// Converts linalg operations that can map to flow.tensor.* operations. +struct ConvertLinalgTensorOpsPass + : public ConvertLinalgTensorOpsBase<ConvertLinalgTensorOpsPass> { + ConvertLinalgTensorOpsPass(bool runBefore) { runBeforeDispatchRegionFormation = runBefore; } - ConvertToFlowTensorOpsPass(const ConvertToFlowTensorOpsPass &that) { + ConvertLinalgTensorOpsPass(const ConvertLinalgTensorOpsPass &that) { runBeforeDispatchRegionFormation = that.runBeforeDispatchRegionFormation; } void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert<IREE::Flow::FlowDialect, memref::MemRefDialect, - mlir::StandardOpsDialect>(); + registry.insert<IREE::Flow::FlowDialect, tensor::TensorDialect, + linalg::LinalgDialect, mlir::StandardOpsDialect>(); } void runOnOperation() override { FuncOp funcOp = getOperation(); @@ -112,9 +110,6 @@ context->allowUnregisteredDialects(true); RewritePatternSet patterns(&getContext()); if (runBeforeDispatchRegionFormation) { - // Rewrite tensor -> flow.tensor ops. - populateTensorToFlowPatterns(&getContext(), patterns); - // Rewrite linalg.tensor -> flow.tensor ops. patterns.insert< LinalgTensorReshapeToFlowTensorReshape<linalg::TensorCollapseShapeOp>, LinalgTensorReshapeToFlowTensorReshape<linalg::TensorExpandShapeOp>>( @@ -130,9 +125,9 @@ }; } // namespace -std::unique_ptr<OperationPass<FuncOp>> createConvertToFlowTensorOpsPass( +std::unique_ptr<OperationPass<FuncOp>> createConvertLinalgTensorOpsPass( bool runBeforeDispatchRegionFormation) { - return std::make_unique<ConvertToFlowTensorOpsPass>( + return std::make_unique<ConvertLinalgTensorOpsPass>( runBeforeDispatchRegionFormation); }
diff --git a/iree/compiler/Dialect/Flow/Transforms/ConvertTensorOps.cpp b/iree/compiler/Dialect/Flow/Transforms/ConvertTensorOps.cpp new file mode 100644 index 0000000..f0cf6de --- /dev/null +++ b/iree/compiler/Dialect/Flow/Transforms/ConvertTensorOps.cpp
@@ -0,0 +1,55 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "iree/compiler/Dialect/Flow/Conversion/TensorToFlow/ConvertTensorToFlow.h" +#include "iree/compiler/Dialect/Flow/IR/FlowDialect.h" +#include "iree/compiler/Dialect/Flow/IR/FlowOps.h" +#include "iree/compiler/Dialect/Flow/Transforms/PassDetail.h" +#include "iree/compiler/Dialect/Flow/Transforms/Passes.h" +#include "mlir/Dialect/StandardOps/IR/Ops.h" +#include "mlir/Dialect/Tensor/IR/Tensor.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" + +#define DEBUG_TYPE "iree-flow-convert-tensor-ops" + +namespace mlir { +namespace iree_compiler { +namespace IREE { +namespace Flow { + +namespace { + +/// Converts operations that can map to flow.tensor.* operations. +struct ConvertTensorOpsPass + : public ConvertTensorOpsBase<ConvertTensorOpsPass> { + void getDependentDialects(DialectRegistry ®istry) const override { + registry.insert<IREE::Flow::FlowDialect, mlir::StandardOpsDialect, + tensor::TensorDialect>(); + } + void runOnOperation() override { + FuncOp funcOp = getOperation(); + MLIRContext *context = funcOp->getContext(); + context->allowUnregisteredDialects(true); + RewritePatternSet patterns(&getContext()); + populateTensorToFlowPatterns(&getContext(), patterns); + if (failed(applyPatternsAndFoldGreedily(funcOp, std::move(patterns)))) { + return signalPassFailure(); + } + } +}; +} // namespace + +std::unique_ptr<OperationPass<FuncOp>> createConvertTensorOpsPass() { + return std::make_unique<ConvertTensorOpsPass>(); +} + +} // namespace Flow +} // namespace IREE +} // namespace iree_compiler +} // namespace mlir
diff --git a/iree/compiler/Dialect/Flow/Transforms/DestructiveUpdateUtils.cpp b/iree/compiler/Dialect/Flow/Transforms/DestructiveUpdateUtils.cpp index 81483a4..bef1726 100644 --- a/iree/compiler/Dialect/Flow/Transforms/DestructiveUpdateUtils.cpp +++ b/iree/compiler/Dialect/Flow/Transforms/DestructiveUpdateUtils.cpp
@@ -13,8 +13,8 @@ #include "iree/compiler/Dialect/Flow/IR/FlowOps.h" #include "iree/compiler/Dialect/HAL/IR/HALOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" #include "iree/compiler/Dialect/LinalgExt/IR/LinalgExtInterfaces.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" #include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/Debug.h" #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
diff --git a/iree/compiler/Dialect/Flow/Transforms/DispatchLinalgOnTensors.cpp b/iree/compiler/Dialect/Flow/Transforms/DispatchLinalgOnTensors.cpp index 45588fa..63f57ff 100644 --- a/iree/compiler/Dialect/Flow/Transforms/DispatchLinalgOnTensors.cpp +++ b/iree/compiler/Dialect/Flow/Transforms/DispatchLinalgOnTensors.cpp
@@ -657,6 +657,9 @@ /// order, i.e. starting from the outer-most to innermost. static SmallVector<unsigned> getPartitionedLoops(Operation *op) { SmallVector<unsigned> partitionedLoops; + if (auto mmt4dOp = dyn_cast<linalg::Mmt4DOp>(op)) { + return {0, 1}; + } if (auto linalgOp = dyn_cast<linalg::LinalgOp>(op)) { size_t numOuterParallelLoops = getNumOuterParallelLoops(linalgOp); partitionedLoops =
diff --git a/iree/compiler/Dialect/Flow/Transforms/ExportBenchmarkFuncs.cpp b/iree/compiler/Dialect/Flow/Transforms/ExportBenchmarkFuncs.cpp index 6b08e9b..aff9e8a 100644 --- a/iree/compiler/Dialect/Flow/Transforms/ExportBenchmarkFuncs.cpp +++ b/iree/compiler/Dialect/Flow/Transforms/ExportBenchmarkFuncs.cpp
@@ -7,8 +7,8 @@ #include "iree/compiler/Dialect/Flow/IR/FlowOps.h" #include "iree/compiler/Dialect/Flow/Transforms/PassDetail.h" #include "iree/compiler/Dialect/Flow/Transforms/Passes.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/BlockAndValueMapping.h" #include "mlir/IR/Builders.h" @@ -28,7 +28,7 @@ : public ExportBenchmarkFuncsBase<ExportBenchmarkFuncsPass> { public: void getDependentDialects(DialectRegistry& registry) const override { - registry.insert<IREEDialect>(); + registry.insert<IREE::Util::UtilDialect>(); } void runOnOperation() override { @@ -44,7 +44,10 @@ } } for (auto entryFuncOp : entryFuncOps) { - createEntryPointBenchmarkFunc(moduleOp, entryFuncOp); + if (failed(createEntryPointBenchmarkFunc(moduleOp, entryFuncOp))) { + signalPassFailure(); + return; + } } } @@ -55,7 +58,11 @@ std::string baseName = "_benchmark_input_"; std::string name = baseName + std::to_string(uniqueId++); auto initialValue = moduleBuilder.getZeroAttr(inputType); - assert(initialValue && "failed to get zero attr for type"); + if (!initialValue) { + mlir::emitError(loc) << "unsupported function argument type: " + << inputType; + return {}; + } auto variableOp = moduleBuilder.create<VariableOp>(loc, name, /*isMutable=*/false, inputType, initialValue); @@ -64,7 +71,8 @@ return variableOp; } - void createEntryPointBenchmarkFunc(ModuleOp moduleOp, FuncOp entryFuncOp) { + LogicalResult createEntryPointBenchmarkFunc(ModuleOp moduleOp, + FuncOp entryFuncOp) { OpBuilder moduleBuilder(&getContext()); moduleBuilder.setInsertionPointAfter(entryFuncOp); @@ -72,8 +80,9 @@ Location loc = entryFuncOp.getLoc(); SmallVector<IREE::Flow::VariableOp, 4> dummyInputVariableOps; for (auto inputType : entryFuncOp.getType().getInputs()) { - dummyInputVariableOps.push_back( - createDummyInputVariableOp(loc, inputType, moduleBuilder)); + auto dummyVar = createDummyInputVariableOp(loc, inputType, moduleBuilder); + if (!dummyVar) return failure(); + dummyInputVariableOps.push_back(dummyVar); } // Create a `() -> ()` entry point op the benchmark tool can run. @@ -102,7 +111,7 @@ // Sink all results with do_not_optimize to ensure that DCE does not // remove the call. for (auto result : callOp.getResults()) { - blockBuilder.create<IREE::DoNotOptimizeOp>(loc, result); + blockBuilder.create<IREE::Util::DoNotOptimizeOp>(loc, result); } blockBuilder.create<mlir::ReturnOp>(loc); @@ -110,6 +119,8 @@ entryFuncOp->setAttr("noinline", moduleBuilder.getUnitAttr()); entryFuncOp->removeAttr("iree.reflection"); entryFuncOp.setPrivate(); + + return success(); } int uniqueId = 0;
diff --git a/iree/compiler/Dialect/Flow/Transforms/InsertConstantClones.cpp b/iree/compiler/Dialect/Flow/Transforms/InsertConstantClones.cpp index 585bf50..2ee4512 100644 --- a/iree/compiler/Dialect/Flow/Transforms/InsertConstantClones.cpp +++ b/iree/compiler/Dialect/Flow/Transforms/InsertConstantClones.cpp
@@ -10,8 +10,8 @@ #include "iree/compiler/Dialect/Flow/IR/FlowOps.h" #include "iree/compiler/Dialect/Flow/Transforms/PassDetail.h" #include "iree/compiler/Dialect/Flow/Transforms/Passes.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" @@ -37,7 +37,7 @@ : public InsertConstantClonesBase<InsertConstantClonesPass> { public: void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert<IREEDialect, IREE::Flow::FlowDialect>(); + registry.insert<IREE::Util::UtilDialect, IREE::Flow::FlowDialect>(); } void runOnOperation() override { @@ -60,7 +60,7 @@ auto op = value.getDefiningOp(); if (!op) return false; if (op->hasTrait<OpTrait::ConstantLike>() || - isa<IREE::UnfoldableConstantOp>(op)) { + isa<IREE::Util::UnfoldableConstantOp>(op)) { return true; } else if (auto loadOp = dyn_cast<IREE::Flow::VariableLoadOp>(op)) { return !loadOp.getLoadedVariable().is_mutable();
diff --git a/iree/compiler/Dialect/Flow/Transforms/Passes.cpp b/iree/compiler/Dialect/Flow/Transforms/Passes.cpp index 86696f6..18c51b9 100644 --- a/iree/compiler/Dialect/Flow/Transforms/Passes.cpp +++ b/iree/compiler/Dialect/Flow/Transforms/Passes.cpp
@@ -125,15 +125,16 @@ passManager.addNestedPass<FuncOp>(mlir::createLinalgDetensorizePass()); } passManager.addPass(memref::createResolveShapedTypeResultDimsPass()); + passManager.addNestedPass<FuncOp>(IREE::Flow::createConvertTensorOpsPass()); passManager.addNestedPass<FuncOp>( - IREE::Flow::createConvertToFlowTensorOpsPass( + IREE::Flow::createConvertLinalgTensorOpsPass( /*runBeforeDispatchRegionFormation=*/true)); passManager.addNestedPass<FuncOp>(mlir::createCanonicalizerPass()); passManager.addNestedPass<FuncOp>( IREE::Flow::createDispatchLinalgOnTensorsPass()); passManager.addPass(memref::createResolveShapedTypeResultDimsPass()); passManager.addNestedPass<FuncOp>( - IREE::Flow::createConvertToFlowTensorOpsPass( + IREE::Flow::createConvertLinalgTensorOpsPass( /*runBeforeDispatchRegionFormation=*/false)); // NOTE: required because the current dispatch-linalg-on-tensors pass // creates a lot of dead IR that needs to be cleaned up.
diff --git a/iree/compiler/Dialect/Flow/Transforms/Passes.h b/iree/compiler/Dialect/Flow/Transforms/Passes.h index 445190e..130c13f 100644 --- a/iree/compiler/Dialect/Flow/Transforms/Passes.h +++ b/iree/compiler/Dialect/Flow/Transforms/Passes.h
@@ -61,11 +61,17 @@ /// the most inner loops. std::unique_ptr<OperationPass<FuncOp>> createInterchangeGenericOpsPass(); -// Convert operations to equivalent flow.tensor.* ops. +// Convert tensor operations to equivalent flow.tensor.* operations // `runBeforeDispatchRegionFormation` controls whether to run before dispatch // region creation. If run after, it will catch operations that were left // outside of dispatch regions and could be represented as flow.tensor.* ops. -std::unique_ptr<OperationPass<FuncOp>> createConvertToFlowTensorOpsPass( +std::unique_ptr<OperationPass<FuncOp>> createConvertTensorOpsPass(); + +// Convert linalg.tensor operations to equivalent flow.tensor.* ops. +// `runBeforeDispatchRegionFormation` controls whether to run before dispatch +// region creation. If run after, it will catch operations that were left +// outside of dispatch regions and could be represented as flow.tensor.* ops. +std::unique_ptr<OperationPass<FuncOp>> createConvertLinalgTensorOpsPass( bool runBeforeDispatchRegionFormation = true); // Promote I1 tensor constants to I8 tensors to match later operations.
diff --git a/iree/compiler/Dialect/Flow/Transforms/Passes.td b/iree/compiler/Dialect/Flow/Transforms/Passes.td index 4dfc814..60f708a 100644 --- a/iree/compiler/Dialect/Flow/Transforms/Passes.td +++ b/iree/compiler/Dialect/Flow/Transforms/Passes.td
@@ -21,10 +21,16 @@ let constructor = "mlir::iree_compiler::IREE::Flow::createConvertConv2DToImg2ColPass()"; } -def ConvertToFlowTensorOps : - Pass<"iree-flow-convert-to-flow-tensor-ops-pass", "FuncOp"> { - let summary = "Convert operations to equivalent flow.tensor.* operations"; - let constructor = "mlir::iree_compiler::IREE::Flow::createConvertToFlowTensorOpsPass()"; +def ConvertTensorOps : + Pass<"iree-flow-convert-tensor-ops-pass", "FuncOp"> { + let summary = "Convert tensor operations to equivalent flow.tensor.* operations"; + let constructor = "mlir::iree_compiler::IREE::Flow::createConvertTensorOpsPass()"; +} + +def ConvertLinalgTensorOps : + Pass<"iree-flow-convert-linalg-tensor-ops-pass", "FuncOp"> { + let summary = "Convert linalg operations to equivalent flow.tensor.* operations"; + let constructor = "mlir::iree_compiler::IREE::Flow::createConvertLinalgTensorOpsPass()"; let options = [ Option<"runBeforeDispatchRegionFormation", "run-before-dispatch-region-formation", "bool", /*default=*/"true", "Run the pass before dispatch region formation">
diff --git a/iree/compiler/Dialect/Flow/Transforms/SimplifyVariableAccesses.cpp b/iree/compiler/Dialect/Flow/Transforms/SimplifyVariableAccesses.cpp index 1c4b048..99c2c64 100644 --- a/iree/compiler/Dialect/Flow/Transforms/SimplifyVariableAccesses.cpp +++ b/iree/compiler/Dialect/Flow/Transforms/SimplifyVariableAccesses.cpp
@@ -79,7 +79,7 @@ static bool doesOpBlockMotion(Operation *op) { return isa<mlir::CallOpInterface>(op) || - op->hasTrait<OpTrait::IREE::YieldPoint>(); + op->hasTrait<OpTrait::IREE::Util::YieldPoint>(); } static void moveOpUpInBlock(Block &block, Operation *op) {
diff --git a/iree/compiler/Dialect/Flow/Transforms/TypeConverter.cpp b/iree/compiler/Dialect/Flow/Transforms/TypeConverter.cpp index 2aeba24..24b090a 100644 --- a/iree/compiler/Dialect/Flow/Transforms/TypeConverter.cpp +++ b/iree/compiler/Dialect/Flow/Transforms/TypeConverter.cpp
@@ -6,7 +6,7 @@ #include "iree/compiler/Dialect/Flow/Transforms/TypeConverter.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "mlir/IR/BuiltinTypes.h" namespace mlir {
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/BUILD b/iree/compiler/Dialect/Flow/Transforms/test/BUILD index b16ee25..12c0f37 100644 --- a/iree/compiler/Dialect/Flow/Transforms/test/BUILD +++ b/iree/compiler/Dialect/Flow/Transforms/test/BUILD
@@ -19,8 +19,8 @@ [ "conv1x1_to_matmul.mlir", "conv2d_to_img2col.mlir", - "convert_to_flow_tensor_ops_after.mlir", - "convert_to_flow_tensor_ops_before.mlir", + "convert_linalg_tensor_ops_after.mlir", + "convert_linalg_tensor_ops_before.mlir", "deduplicate_executables.mlir", "dispatch_linalg_on_tensors.mlir", "dispatch_linalg_on_tensors_elementwise.mlir",
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/CMakeLists.txt b/iree/compiler/Dialect/Flow/Transforms/test/CMakeLists.txt index 01a4294..83b449a 100644 --- a/iree/compiler/Dialect/Flow/Transforms/test/CMakeLists.txt +++ b/iree/compiler/Dialect/Flow/Transforms/test/CMakeLists.txt
@@ -16,8 +16,8 @@ SRCS "conv1x1_to_matmul.mlir" "conv2d_to_img2col.mlir" - "convert_to_flow_tensor_ops_after.mlir" - "convert_to_flow_tensor_ops_before.mlir" + "convert_linalg_tensor_ops_after.mlir" + "convert_linalg_tensor_ops_before.mlir" "deduplicate_executables.mlir" "dispatch_linalg_on_tensors.mlir" "dispatch_linalg_on_tensors_elementwise.mlir"
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/convert_to_flow_tensor_ops_after.mlir b/iree/compiler/Dialect/Flow/Transforms/test/convert_linalg_tensor_ops_after.mlir similarity index 91% rename from iree/compiler/Dialect/Flow/Transforms/test/convert_to_flow_tensor_ops_after.mlir rename to iree/compiler/Dialect/Flow/Transforms/test/convert_linalg_tensor_ops_after.mlir index f126e54..e3e964e 100644 --- a/iree/compiler/Dialect/Flow/Transforms/test/convert_to_flow_tensor_ops_after.mlir +++ b/iree/compiler/Dialect/Flow/Transforms/test/convert_linalg_tensor_ops_after.mlir
@@ -1,4 +1,4 @@ -// RUN: iree-opt -iree-flow-convert-to-flow-tensor-ops-pass='run-before-dispatch-region-formation=false' -canonicalize -cse -split-input-file %s | IreeFileCheck %s +// RUN: iree-opt -iree-flow-convert-linalg-tensor-ops-pass='run-before-dispatch-region-formation=false' -canonicalize -cse -split-input-file %s | IreeFileCheck %s func @turn_fill_into_splat(%arg0: tensor<?x?xf32>, %arg1: tensor<f32>, %arg2: index, %arg3: index, %arg4: index, %arg5: index) -> tensor<?x?xf32> { %c0 = constant 0 : index
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/convert_to_flow_tensor_ops_before.mlir b/iree/compiler/Dialect/Flow/Transforms/test/convert_linalg_tensor_ops_before.mlir similarity index 86% rename from iree/compiler/Dialect/Flow/Transforms/test/convert_to_flow_tensor_ops_before.mlir rename to iree/compiler/Dialect/Flow/Transforms/test/convert_linalg_tensor_ops_before.mlir index 863d717..621912c 100644 --- a/iree/compiler/Dialect/Flow/Transforms/test/convert_to_flow_tensor_ops_before.mlir +++ b/iree/compiler/Dialect/Flow/Transforms/test/convert_linalg_tensor_ops_before.mlir
@@ -1,4 +1,4 @@ -// RUN: iree-opt -iree-flow-convert-to-flow-tensor-ops-pass -canonicalize -cse -split-input-file %s | IreeFileCheck %s +// RUN: iree-opt -iree-flow-convert-linalg-tensor-ops-pass -canonicalize -cse -split-input-file %s | IreeFileCheck %s func @tensor_reshape(%arg0 : tensor<?x4x?x5x?x6xf32>, %arg1 : tensor<20x?x40xf32>) -> (tensor<?x5x?xf32>, tensor<5x4x?x4x2x4x5xf32>)
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir b/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir index 56ae3cd..afe63b1 100644 --- a/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir +++ b/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir
@@ -853,8 +853,8 @@ %c0 = constant 0 : index %c1 = constant 1 : index %cst = constant 0.000000e+00 : f32 - %0 = iree.dynamic_shape_constant dense<[[1.500000e+01, 1.400000e+01, 1.300000e+01], [1.200000e+01, 1.100000e+01, 1.000000e+01], [9.000000e+00, 8.000000e+00, 7.000000e+00], [6.000000e+00, 5.000000e+00, 4.000000e+00], [3.000000e+00, 2.000000e+00, 1.000000e+00]]> : tensor<5x3xf32> -> tensor<?x?xf32> - %1 = iree.dynamic_shape_constant dense<[[1.500000e+01, 1.400000e+01, 1.300000e+01, 1.200000e+01, 1.100000e+01], [1.000000e+01, 9.000000e+00, 8.000000e+00, 7.000000e+00, 6.000000e+00], [5.000000e+00, 4.000000e+00, 3.000000e+00, 2.000000e+00, 1.000000e+00]]> : tensor<3x5xf32> -> tensor<?x?xf32> + %0 = util.dynamic_shape_constant dense<[[1.500000e+01, 1.400000e+01, 1.300000e+01], [1.200000e+01, 1.100000e+01, 1.000000e+01], [9.000000e+00, 8.000000e+00, 7.000000e+00], [6.000000e+00, 5.000000e+00, 4.000000e+00], [3.000000e+00, 2.000000e+00, 1.000000e+00]]> : tensor<5x3xf32> -> tensor<?x?xf32> + %1 = util.dynamic_shape_constant dense<[[1.500000e+01, 1.400000e+01, 1.300000e+01, 1.200000e+01, 1.100000e+01], [1.000000e+01, 9.000000e+00, 8.000000e+00, 7.000000e+00, 6.000000e+00], [5.000000e+00, 4.000000e+00, 3.000000e+00, 2.000000e+00, 1.000000e+00]]> : tensor<3x5xf32> -> tensor<?x?xf32> %2 = tensor.dim %0, %c0 : tensor<?x?xf32> %3 = tensor.dim %1, %c1 : tensor<?x?xf32> %4 = linalg.init_tensor [%2, %3] : tensor<?x?xf32>
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/export_benchmark_funcs.mlir b/iree/compiler/Dialect/Flow/Transforms/test/export_benchmark_funcs.mlir index eb59861..687c3ad 100644 --- a/iree/compiler/Dialect/Flow/Transforms/test/export_benchmark_funcs.mlir +++ b/iree/compiler/Dialect/Flow/Transforms/test/export_benchmark_funcs.mlir
@@ -14,8 +14,8 @@ // CHECK-DAG: %[[ARG0:.+]] = flow.variable.load @[[MAIN_IN_0]] : tensor<5x3xf32> // CHECK-DAG: %[[ARG1:.+]] = flow.variable.load @[[MAIN_IN_1]] : tensor<3x5xf32> // CHECK: %[[RET:.+]]:2 = call @two_dispatch(%[[ARG0]], %[[ARG1]]) -// CHECK-DAG: iree.do_not_optimize(%[[RET]]#0) : tensor<5x5xf32> -// CHECK-DAG: iree.do_not_optimize(%[[RET]]#1) : tensor<3x5xf32> +// CHECK-DAG: util.do_not_optimize(%[[RET]]#0) : tensor<5x5xf32> +// CHECK-DAG: util.do_not_optimize(%[[RET]]#1) : tensor<3x5xf32> // ----- @@ -38,6 +38,6 @@ // CHECK-DAG: %[[ARG0:.+]] = flow.variable.load @_benchmark_input_0 : tensor<i32> // CHECK-DAG: %[[ARG1:.+]] = flow.variable.load @_benchmark_input_1 : tensor<i32> // CHECK: %[[RET0:.+]] = call @while(%[[ARG0]], %[[ARG1]]) -// CHECK: iree.do_not_optimize(%[[RET0]]) : tensor<i32> +// CHECK: util.do_not_optimize(%[[RET0]]) : tensor<i32> // CHECK: return // CHECK: }
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/form_streams.mlir b/iree/compiler/Dialect/Flow/Transforms/test/form_streams.mlir index af834cb..069bd70 100644 --- a/iree/compiler/Dialect/Flow/Transforms/test/form_streams.mlir +++ b/iree/compiler/Dialect/Flow/Transforms/test/form_streams.mlir
@@ -329,8 +329,8 @@ // CHECK-DAG: %[[D1:.+]] = flow.dispatch @dispatch_1::@dispatch_1 // CHECK-NEXT: flow.return %[[D1]] %d1 = flow.dispatch @dispatch_1::@dispatch_1[%w](%c1) : (i32) -> tensor<i32> - // CHECK: %[[SE_USER:.+]] = iree.do_not_optimize(%[[S1]]) - %side_effecting_user = iree.do_not_optimize(%d1) : tensor<i32> + // CHECK: %[[SE_USER:.+]] = util.do_not_optimize(%[[S1]]) + %side_effecting_user = util.do_not_optimize(%d1) : tensor<i32> %c2 = constant 2 : i32 // CHECK: %[[S2:.+]] = flow.ex.stream.fragment // CHECK: %[[C2:.+]] = constant 2
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/hoist_unstreamable_ops.mlir b/iree/compiler/Dialect/Flow/Transforms/test/hoist_unstreamable_ops.mlir index 811e3e4..1566d74 100644 --- a/iree/compiler/Dialect/Flow/Transforms/test/hoist_unstreamable_ops.mlir +++ b/iree/compiler/Dialect/Flow/Transforms/test/hoist_unstreamable_ops.mlir
@@ -53,8 +53,8 @@ // CHECK-DAG: constant 1 // CHECK-DAG: constant 2 %c1 = constant 1 : index - // CHECK: iree.do_not_optimize() - iree.do_not_optimize() + // CHECK: util.do_not_optimize() + util.do_not_optimize() %c2 = constant 2 : index return }
diff --git a/iree/compiler/Dialect/HAL/Conversion/BUILD b/iree/compiler/Dialect/HAL/Conversion/BUILD index d35ee5b..1e51f42 100644 --- a/iree/compiler/Dialect/HAL/Conversion/BUILD +++ b/iree/compiler/Dialect/HAL/Conversion/BUILD
@@ -24,8 +24,8 @@ deps = [ "//iree/compiler/Dialect/HAL/IR", "//iree/compiler/Dialect/HAL/Utils", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Shape/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//mlir:IR", "@llvm-project//mlir:MemRefDialect", "@llvm-project//mlir:StandardOps",
diff --git a/iree/compiler/Dialect/HAL/Conversion/CMakeLists.txt b/iree/compiler/Dialect/HAL/Conversion/CMakeLists.txt index deff9a7..36b659c 100644 --- a/iree/compiler/Dialect/HAL/Conversion/CMakeLists.txt +++ b/iree/compiler/Dialect/HAL/Conversion/CMakeLists.txt
@@ -27,8 +27,8 @@ MLIRTransforms iree::compiler::Dialect::HAL::IR iree::compiler::Dialect::HAL::Utils - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Shape::IR + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/Dialect/HAL/Conversion/ConversionTarget.cpp b/iree/compiler/Dialect/HAL/Conversion/ConversionTarget.cpp index ea6832c..68c60b6 100644 --- a/iree/compiler/Dialect/HAL/Conversion/ConversionTarget.cpp +++ b/iree/compiler/Dialect/HAL/Conversion/ConversionTarget.cpp
@@ -9,8 +9,8 @@ #include "iree/compiler/Dialect/HAL/Conversion/TypeConverter.h" #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/Utils/TypeUtils.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" #include "iree/compiler/Dialect/Shape/IR/ShapeOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/BuiltinOps.h"
diff --git a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/BUILD b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/BUILD index 2960cbd..be111b6 100644 --- a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/BUILD +++ b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/BUILD
@@ -28,8 +28,8 @@ "//iree/compiler/Dialect/HAL/IR:HALDialect", "//iree/compiler/Dialect/HAL/Target", "//iree/compiler/Dialect/HAL/Utils", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Shape/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:Analysis", "@llvm-project//mlir:IR",
diff --git a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/CMakeLists.txt b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/CMakeLists.txt index fb86f65..18ad2ca 100644 --- a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/CMakeLists.txt +++ b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/CMakeLists.txt
@@ -34,8 +34,8 @@ iree::compiler::Dialect::HAL::IR::HALDialect iree::compiler::Dialect::HAL::Target iree::compiler::Dialect::HAL::Utils - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Shape::IR + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/ConvertStreamOps.cpp b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/ConvertStreamOps.cpp index 20838ac..331df07 100644 --- a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/ConvertStreamOps.cpp +++ b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/ConvertStreamOps.cpp
@@ -12,9 +12,9 @@ #include "iree/compiler/Dialect/HAL/Target/TargetRegistry.h" #include "iree/compiler/Dialect/HAL/Utils/DeviceSwitchBuilder.h" #include "iree/compiler/Dialect/HAL/Utils/TypeUtils.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" #include "iree/compiler/Dialect/Shape/IR/Builders.h" #include "iree/compiler/Dialect/Shape/IR/ShapeOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/DenseMap.h" #include "llvm/Support/Debug.h" #include "mlir/Analysis/Liveness.h" @@ -57,7 +57,8 @@ // Start with outputs so that we handle tied values that may lead all the way // back up the chain to the stream inputs. - auto tiedStreamOp = cast<IREE::TiedOpInterface>(streamOp.getOperation()); + auto tiedStreamOp = + cast<IREE::Util::TiedOpInterface>(streamOp.getOperation()); auto returnOp = cast<IREE::Flow::ReturnOp>(streamBlock->back()); for (auto result : llvm::enumerate(streamOp.getResults())) { auto streamValue = returnOp.getOperand(result.index()); @@ -72,7 +73,7 @@ } for (auto &op : *streamBlock) { - auto tiedOp = dyn_cast<IREE::TiedOpInterface>(op); + auto tiedOp = dyn_cast<IREE::Util::TiedOpInterface>(op); for (auto it : llvm::enumerate(op.getResults())) { auto result = it.value(); if (!result.getType().isa<ShapedType>()) continue; @@ -490,7 +491,8 @@ IREE::Flow::ExStreamFragmentOp streamOp, StreamSchedulingState &schedulingState, ConversionPatternRewriter &rewriter, SmallVectorImpl<Value> &output) { - auto tiedStreamOp = cast<IREE::TiedOpInterface>(streamOp.getOperation()); + auto tiedStreamOp = + cast<IREE::Util::TiedOpInterface>(streamOp.getOperation()); auto &entryBlock = streamOp.body().front(); SmallVector<Value> outputBuffers;
diff --git a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/ConvertTensorOps.cpp b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/ConvertTensorOps.cpp index b526cf1..3a75f1f 100644 --- a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/ConvertTensorOps.cpp +++ b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/ConvertTensorOps.cpp
@@ -9,7 +9,7 @@ #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" #include "iree/compiler/Dialect/HAL/Utils/TypeUtils.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" #include "mlir/Dialect/StandardOps/IR/Ops.h"
diff --git a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/ConvertVariableOps.cpp b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/ConvertVariableOps.cpp index 664c107..169f86a 100644 --- a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/ConvertVariableOps.cpp +++ b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/ConvertVariableOps.cpp
@@ -9,7 +9,7 @@ #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" #include "iree/compiler/Dialect/HAL/Utils/TypeUtils.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/DenseMap.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Attributes.h" @@ -225,8 +225,10 @@ ConversionPatternRewriter &rewriter) const override { IREE::Flow::VariableStoreIndirectOp::Adaptor operands(newOperands); - Type variableType = - operands.variable().getType().cast<IREE::PtrType>().getTargetType(); + Type variableType = operands.variable() + .getType() + .cast<IREE::Util::PtrType>() + .getTargetType(); Value storeValue = implicitCastVariableStore( storeOp.getLoc(), operands.value(), variableType, rewriter); if (!storeValue) {
diff --git a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/test/constant_ops.mlir b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/test/constant_ops.mlir index 96cc5bf..34f51cc 100644 --- a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/test/constant_ops.mlir +++ b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/test/constant_ops.mlir
@@ -28,11 +28,11 @@ flow.variable @var_indirect mutable : tensor<i32> func @fn() { // CHECK: %[[ADDR:.+]] = hal.variable.address @var_indirect - %0 = flow.variable.address @var_indirect : !iree.ptr<tensor<i32>> + %0 = flow.variable.address @var_indirect : !util.ptr<tensor<i32>> // CHECK-NEXT: %[[VALUE:.+]] = hal.variable.load.indirect %[[ADDR]] - %1 = flow.variable.load.indirect %0 : !iree.ptr<tensor<i32>> -> tensor<i32> + %1 = flow.variable.load.indirect %0 : !util.ptr<tensor<i32>> -> tensor<i32> // CHECK-NEXT: hal.variable.store.indirect %[[VALUE]], %[[ADDR]] - flow.variable.store.indirect %1, %0 : tensor<i32> -> !iree.ptr<tensor<i32>> + flow.variable.store.indirect %1, %0 : tensor<i32> -> !util.ptr<tensor<i32>> return }
diff --git a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/test/variable_ops.mlir b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/test/variable_ops.mlir index 30c0953..c0ec959 100644 --- a/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/test/variable_ops.mlir +++ b/iree/compiler/Dialect/HAL/Conversion/FlowToHAL/test/variable_ops.mlir
@@ -28,11 +28,11 @@ flow.variable @var_indirect mutable : tensor<i32> func @fn() { // CHECK: %[[ADDR:.+]] = hal.variable.address @var_indirect - %0 = flow.variable.address @var_indirect : !iree.ptr<tensor<i32>> + %0 = flow.variable.address @var_indirect : !util.ptr<tensor<i32>> // CHECK-NEXT: %[[VALUE:.+]] = hal.variable.load.indirect %[[ADDR]] - %1 = flow.variable.load.indirect %0 : !iree.ptr<tensor<i32>> -> tensor<i32> + %1 = flow.variable.load.indirect %0 : !util.ptr<tensor<i32>> -> tensor<i32> // CHECK-NEXT: hal.variable.store.indirect %[[VALUE]], %[[ADDR]] - flow.variable.store.indirect %1, %0 : tensor<i32> -> !iree.ptr<tensor<i32>> + flow.variable.store.indirect %1, %0 : tensor<i32> -> !util.ptr<tensor<i32>> return } @@ -92,13 +92,13 @@ // Checks that the implicit cast allowing a buffer_view to indirect store into // a variable that maps to a buffer is permitted. // CHECK-LABEL: hal.variable @var_indirect_with_buffer_view_store -// CHECK: %[[ptr:.*]] = hal.variable.address @var_indirect_with_buffer_view_store : !iree.ptr<!hal.buffer> +// CHECK: %[[ptr:.*]] = hal.variable.address @var_indirect_with_buffer_view_store : !util.ptr<!hal.buffer> // CHECK: %[[buffer:.*]] = hal.buffer_view.buffer %arg0 : !hal.buffer -// CHECK: hal.variable.store.indirect %[[buffer]], %[[ptr]] : !hal.buffer -> !iree.ptr<!hal.buffer> +// CHECK: hal.variable.store.indirect %[[buffer]], %[[ptr]] : !hal.buffer -> !util.ptr<!hal.buffer> flow.variable @var_indirect_with_buffer_view_store mutable : tensor<i32> func @fn(%arg0: !hal.buffer_view) { - %0 = flow.variable.address @var_indirect_with_buffer_view_store : !iree.ptr<tensor<i32>> + %0 = flow.variable.address @var_indirect_with_buffer_view_store : !util.ptr<tensor<i32>> %1 = hal.tensor.cast %arg0 : !hal.buffer_view -> tensor<i32> - flow.variable.store.indirect %1, %0 : tensor<i32> -> !iree.ptr<tensor<i32>> + flow.variable.store.indirect %1, %0 : tensor<i32> -> !util.ptr<tensor<i32>> return }
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToHAL/BUILD b/iree/compiler/Dialect/HAL/Conversion/HALToHAL/BUILD index 412dae6..51bc106 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToHAL/BUILD +++ b/iree/compiler/Dialect/HAL/Conversion/HALToHAL/BUILD
@@ -23,7 +23,7 @@ "//iree/compiler/Dialect/HAL/Conversion", "//iree/compiler/Dialect/HAL/IR", "//iree/compiler/Dialect/HAL/Utils", - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR", "@llvm-project//mlir:Pass",
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToHAL/CMakeLists.txt b/iree/compiler/Dialect/HAL/Conversion/HALToHAL/CMakeLists.txt index 43cb8c8..a67dd16 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToHAL/CMakeLists.txt +++ b/iree/compiler/Dialect/HAL/Conversion/HALToHAL/CMakeLists.txt
@@ -27,7 +27,7 @@ iree::compiler::Dialect::HAL::Conversion iree::compiler::Dialect::HAL::IR iree::compiler::Dialect::HAL::Utils - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToHAL/ConvertConstantOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToHAL/ConvertConstantOps.cpp index 383c0c4..b5125b3 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToHAL/ConvertConstantOps.cpp +++ b/iree/compiler/Dialect/HAL/Conversion/HALToHAL/ConvertConstantOps.cpp
@@ -6,7 +6,7 @@ #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Transforms/DialectConversion.h"
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/BUILD b/iree/compiler/Dialect/HAL/Conversion/HALToVM/BUILD index bf01d62..59142ca 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/BUILD +++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/BUILD
@@ -33,7 +33,7 @@ "//iree/compiler/Dialect/HAL:hal_imports", "//iree/compiler/Dialect/HAL/IR", "//iree/compiler/Dialect/HAL/Utils", - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Dialect/VM/Conversion", "//iree/compiler/Dialect/VM/Conversion/IREEToVM", "//iree/compiler/Dialect/VM/Conversion/StandardToVM",
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/CMakeLists.txt b/iree/compiler/Dialect/HAL/Conversion/HALToVM/CMakeLists.txt index 548ab77..0ca0990 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/CMakeLists.txt +++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/CMakeLists.txt
@@ -37,7 +37,7 @@ iree::compiler::Dialect::HAL::IR iree::compiler::Dialect::HAL::Utils iree::compiler::Dialect::HAL::hal_imports - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR iree::compiler::Dialect::VM::Conversion iree::compiler::Dialect::VM::Conversion::IREEToVM iree::compiler::Dialect::VM::Conversion::StandardToVM
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertConstantOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertConstantOps.cpp index ce8f687..1746f4b 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertConstantOps.cpp +++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertConstantOps.cpp
@@ -6,7 +6,7 @@ #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree/compiler/Dialect/VM/IR/VMOps.h" #include "mlir/Transforms/DialectConversion.h"
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExecutableOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExecutableOps.cpp index 30bc24b..c9fd73a 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExecutableOps.cpp +++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExecutableOps.cpp
@@ -9,7 +9,7 @@ #include "iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.h" #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree/compiler/Dialect/VM/Conversion/ImportUtils.h" #include "iree/compiler/Dialect/VM/IR/VMOps.h" #include "llvm/ADT/DenseMap.h"
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.cpp index c344370..b128fcc 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.cpp +++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.cpp
@@ -9,8 +9,8 @@ #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" #include "iree/compiler/Dialect/HAL/hal.imports.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree/compiler/Dialect/VM/Conversion/ConversionTarget.h" #include "iree/compiler/Dialect/VM/Conversion/IREEToVM/ConvertIREEToVM.h" #include "iree/compiler/Dialect/VM/Conversion/ImportUtils.h" @@ -103,7 +103,7 @@ : targetOptions_(targetOptions) {} void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert<IREEDialect, IREE::VM::VMDialect>(); + registry.insert<IREE::Util::UtilDialect, IREE::VM::VMDialect>(); } StringRef getArgument() const override { return "iree-convert-hal-to-vm"; }
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertVariableOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertVariableOps.cpp index e5f4e85..4164f82 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertVariableOps.cpp +++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertVariableOps.cpp
@@ -6,7 +6,7 @@ #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree/compiler/Dialect/VM/IR/VMOps.h" #include "mlir/Transforms/DialectConversion.h" @@ -25,9 +25,10 @@ auto convertedType = typeConverter.convertType(op.type()); if (convertedType.isa<IREE::VM::RefType>() || IREE::VM::RefType::isCompatible(convertedType)) { - rewriter.replaceOpWithNewOp<IREE::VM::GlobalRefOp>( + auto newOp = rewriter.replaceOpWithNewOp<IREE::VM::GlobalRefOp>( op, op.sym_name(), op.is_mutable(), convertedType, op.initializer(), op.initial_value(), llvm::to_vector<4>(op->getDialectAttrs())); + newOp.setVisibility(op.getVisibility()); return success(); } else if (convertedType.isInteger(32)) { auto convertedValue = @@ -35,9 +36,10 @@ ? rewriter.getI32IntegerAttr(static_cast<int32_t>( op.initial_value().getValue().cast<IntegerAttr>().getInt())) : Attribute{}; - rewriter.replaceOpWithNewOp<IREE::VM::GlobalI32Op>( + auto newOp = rewriter.replaceOpWithNewOp<IREE::VM::GlobalI32Op>( op, op.sym_name(), op.is_mutable(), convertedType, op.initializer(), convertedValue, llvm::to_vector<4>(op->getDialectAttrs())); + newOp.setVisibility(op.getVisibility()); return success(); } else if (convertedType.isInteger(64)) { auto convertedValue = @@ -45,9 +47,10 @@ ? rewriter.getI64IntegerAttr( op.initial_value().getValue().cast<IntegerAttr>().getInt()) : Attribute{}; - rewriter.replaceOpWithNewOp<IREE::VM::GlobalI64Op>( + auto newOp = rewriter.replaceOpWithNewOp<IREE::VM::GlobalI64Op>( op, op.sym_name(), op.is_mutable(), convertedType, op.initializer(), convertedValue, llvm::to_vector<4>(op->getDialectAttrs())); + newOp.setVisibility(op.getVisibility()); return success(); } else if (convertedType.isF32()) { auto convertedValue = op.initial_value().hasValue() @@ -57,9 +60,10 @@ .cast<FloatAttr>() .getValueAsDouble())) : Attribute{}; - rewriter.replaceOpWithNewOp<IREE::VM::GlobalF32Op>( + auto newOp = rewriter.replaceOpWithNewOp<IREE::VM::GlobalF32Op>( op, op.sym_name(), op.is_mutable(), convertedType, op.initializer(), convertedValue, llvm::to_vector<4>(op->getDialectAttrs())); + newOp.setVisibility(op.getVisibility()); return success(); } else if (convertedType.isF64()) { auto convertedValue = @@ -69,9 +73,10 @@ .cast<FloatAttr>() .getValueAsDouble()) : Attribute{}; - rewriter.replaceOpWithNewOp<IREE::VM::GlobalF64Op>( + auto newOp = rewriter.replaceOpWithNewOp<IREE::VM::GlobalF64Op>( op, op.sym_name(), op.is_mutable(), convertedType, op.initializer(), convertedValue, llvm::to_vector<4>(op->getDialectAttrs())); + newOp.setVisibility(op.getVisibility()); return success(); } return op.emitOpError("unsupported variable type");
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/allocator_ops.mlir b/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/allocator_ops.mlir index 745effd..5fc4a46 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/allocator_ops.mlir +++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/allocator_ops.mlir
@@ -1,6 +1,6 @@ // RUN: iree-opt -split-input-file -canonicalize -iree-convert-hal-to-vm %s | IreeFileCheck %s -// CHECK-LABEL: @allocatorComputeSizeFoldsAway +// CHECK-LABEL: vm.func private @allocatorComputeSizeFoldsAway func @allocatorComputeSizeFoldsAway(%arg0 : !hal.allocator) -> index { // CHECK: %c4194304 = vm.const.i32 4194304 : i32 // CHECK-NOT: hal.allocator.compute_size @@ -12,7 +12,7 @@ // ----- -// CHECK-LABEL: @allocatorAllocate +// CHECK-LABEL: vm.func private @allocatorAllocate func @allocatorAllocate(%arg0 : !hal.allocator) -> !hal.buffer { %c1024 = constant 1024 : index // CHECK: %ref = vm.call @hal.allocator.allocate(%arg0, %c6, %c14, %c1024) : (!vm.ref<!hal.allocator>, i32, i32, i32) -> !vm.ref<!hal.buffer> @@ -22,11 +22,11 @@ // ----- -// CHECK-LABEL: func @allocatorMapByteBuffer -func @allocatorMapByteBuffer(%arg0 : !hal.allocator, %arg1 : !iree.byte_buffer) -> !hal.buffer { +// CHECK-LABEL: vm.func private @allocatorMapByteBuffer +func @allocatorMapByteBuffer(%arg0 : !hal.allocator, %arg1 : !util.byte_buffer) -> !hal.buffer { %offset = constant 128 : index %length = constant 256 : index // CHECK: = vm.call @hal.allocator.wrap.byte_buffer(%arg0, %c6, %c2, %arg1, %c128, %c256) : (!vm.ref<!hal.allocator>, i32, i32, !vm.buffer, i32, i32) -> !vm.ref<!hal.buffer> - %buffer = hal.allocator.map<%arg0 : !hal.allocator> source(%arg1 : !iree.byte_buffer)[%offset, %length] type("HostVisible|HostCoherent") usage(Transfer) : !hal.buffer + %buffer = hal.allocator.map<%arg0 : !hal.allocator> source(%arg1 : !util.byte_buffer)[%offset, %length] type("HostVisible|HostCoherent") usage(Transfer) : !hal.buffer return %buffer : !hal.buffer }
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/buffer_view_ops.mlir b/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/buffer_view_ops.mlir index 50b8d1a..e32cc72 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/buffer_view_ops.mlir +++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/buffer_view_ops.mlir
@@ -1,6 +1,6 @@ // RUN: iree-opt -split-input-file -iree-convert-hal-to-vm %s | IreeFileCheck %s -// CHECK-LABEL: func @buffer_view_dims +// CHECK-LABEL: vm.func private @buffer_view_dims // CHECK-SAME: %[[VIEW:.+]]: !vm.ref<!hal.buffer_view> func @buffer_view_dims(%arg0 : !hal.buffer_view) -> (index, index, index) { // CHECK-DAG: %[[D0:.+]] = vm.call @hal.buffer_view.dim(%[[VIEW]], %zero)
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/constant_ops.mlir b/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/constant_ops.mlir index c9225bf..2639235 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/constant_ops.mlir +++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/constant_ops.mlir
@@ -11,7 +11,7 @@ hal.constant_storage @_storage1 = dense<[6, 7, 8, 0]> : vector<4xi8> } -// CHECK: vm.global.ref @pool_storage0_buffer initializer(@pool_storage0_buffer_initializer) : !vm.ref<!hal.buffer> +// CHECK: vm.global.ref private @pool_storage0_buffer initializer(@pool_storage0_buffer_initializer) : !vm.ref<!hal.buffer> hal.variable @pool_storage0_buffer init(@pool_storage0_buffer_initializer) : !hal.buffer attributes {sym_visibility = "private"} // CHECK: vm.func private @pool_storage0_buffer_initializer() -> !vm.ref<!hal.buffer> func private @pool_storage0_buffer_initializer() -> !hal.buffer { @@ -20,20 +20,20 @@ %dev = hal.ex.shared_device : !hal.device %allocator = hal.device.allocator<%dev : !hal.device> : !hal.allocator // CHECK: [[STORAGE_REF:%.+]] = vm.const.ref.rodata @pool_storage0 : !vm.buffer - %storage = hal.constant_storage.lookup @pool::@_storage0 : !iree.byte_buffer + %storage = hal.constant_storage.lookup @pool::@_storage0 : !util.byte_buffer // CHECK: = vm.call @hal.allocator.wrap.byte_buffer({{.+}}, %c22, %c15, [[STORAGE_REF]], %zero, %c16) %mapped = hal.allocator.map<%allocator : !hal.allocator> - source(%storage : !iree.byte_buffer)[%c0, %c16] + source(%storage : !util.byte_buffer)[%c0, %c16] type("HostVisible|HostCoherent|DeviceVisible") usage("Constant|Transfer|Mapping|Dispatch") : !hal.buffer return %mapped : !hal.buffer } -// CHECK: vm.global.ref @pool_storage1_buffer initializer(@pool_storage1_buffer_initializer) : !vm.ref<!hal.buffer> +// CHECK: vm.global.ref private @pool_storage1_buffer initializer(@pool_storage1_buffer_initializer) : !vm.ref<!hal.buffer> hal.variable @pool_storage1_buffer init(@pool_storage1_buffer_initializer) : !hal.buffer attributes {sym_visibility = "private"} func private @pool_storage1_buffer_initializer() -> !hal.buffer -// CHECK: vm.global.ref @pool_splats initializer(@pool_splats_initializer) : !vm.ref<!hal.buffer> +// CHECK: vm.global.ref private @pool_splats initializer(@pool_splats_initializer) : !vm.ref<!hal.buffer> hal.variable @pool_splats init(@pool_splats_initializer) : !hal.buffer attributes {sym_visibility = "private"} // CHECK: vm.func private @pool_splats_initializer() -> !vm.ref<!hal.buffer> func private @pool_splats_initializer() -> !hal.buffer {
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/control_flow_ops.mlir b/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/control_flow_ops.mlir index ac39137..e427275 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/control_flow_ops.mlir +++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/control_flow_ops.mlir
@@ -1,6 +1,6 @@ // RUN: iree-opt -split-input-file -iree-convert-hal-to-vm %s | IreeFileCheck %s -// CHECK-LABEL: func @check_success +// CHECK-LABEL: vm.func private @check_success func @check_success() { // CHECK: %[[CODE:.+]] = %statusCode = constant 1 : i32 @@ -11,7 +11,7 @@ // ----- -// CHECK-LABEL: func @check_success_with_message +// CHECK-LABEL: vm.func private @check_success_with_message func @check_success_with_message() { // CHECK: %[[CODE:.+]] = %statusCode = constant 1 : i32
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/variable_ops.mlir b/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/variable_ops.mlir index 2e47c61..b757804 100644 --- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/variable_ops.mlir +++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/test/variable_ops.mlir
@@ -1,19 +1,22 @@ // RUN: iree-opt -split-input-file -iree-convert-hal-to-vm %s | IreeFileCheck %s -// CHECK: vm.global.i32 @v_initialized_const = 4 : i32 +// CHECK: vm.global.i32 public @v_initialized_const = 4 : i32 hal.variable @v_initialized_const = 4 : i32 +// CHECK: vm.global.i32 private @v_private_const = 5 : i32 +hal.variable @v_private_const attributes {sym_visibility = "private"} = 5 : i32 + // ----- -// CHECK: vm.global.ref @v_initialized initializer(@initializer) : !vm.ref<!hal.buffer> +// CHECK: vm.global.ref public @v_initialized initializer(@initializer) : !vm.ref<!hal.buffer> hal.variable @v_initialized init(@initializer) : !hal.buffer func private @initializer() -> !hal.buffer // ----- -// CHECK: vm.global.ref @v_loaded : !vm.ref<!hal.buffer> +// CHECK: vm.global.ref public @v_loaded : !vm.ref<!hal.buffer> hal.variable @v_loaded : !hal.buffer -// CHECK-LABEL: func @loaded +// CHECK-LABEL: vm.func private @loaded func @loaded() { // CHECK: %v_loaded = vm.global.load.ref @v_loaded : !vm.ref<!hal.buffer> %0 = hal.variable.load @v_loaded : !hal.buffer @@ -22,9 +25,9 @@ // ----- -// CHECK: vm.global.ref mutable @v_stored : !vm.ref<!hal.buffer> +// CHECK: vm.global.ref public mutable @v_stored : !vm.ref<!hal.buffer> hal.variable @v_stored mutable : !hal.buffer -// CHECK-LABEL: func @stored +// CHECK-LABEL: vm.func private @stored func @stored(%arg0 : !hal.buffer) { // CHECK: vm.global.store.ref %arg0, @v_stored : !vm.ref<!hal.buffer> hal.variable.store %arg0, @v_stored : !hal.buffer @@ -37,9 +40,9 @@ // CHECK-LABEL: @loaded_indirect func @loaded_indirect() -> !hal.buffer { // CHECK-NEXT: %[[ADDR:.+]] = vm.global.address @v_loaded - %0 = hal.variable.address @v_loaded : !iree.ptr<!hal.buffer> + %0 = hal.variable.address @v_loaded : !util.ptr<!hal.buffer> // CHECK-NEXT: = vm.global.load.indirect.ref %[[ADDR]] - %1 = hal.variable.load.indirect %0 : !iree.ptr<!hal.buffer> -> !hal.buffer + %1 = hal.variable.load.indirect %0 : !util.ptr<!hal.buffer> -> !hal.buffer return %1 : !hal.buffer } @@ -49,8 +52,8 @@ // CHECK-LABEL: @stored_indirect func @stored_indirect(%arg0 : !hal.buffer) { // CHECK-NEXT: %[[ADDR:.+]] = vm.global.address @v_stored - %0 = hal.variable.address @v_stored : !iree.ptr<!hal.buffer> + %0 = hal.variable.address @v_stored : !util.ptr<!hal.buffer> // CHECK-NEXT: vm.global.store.indirect.ref %arg0, %[[ADDR]] - hal.variable.store.indirect %arg0, %0 : !hal.buffer -> !iree.ptr<!hal.buffer> + hal.variable.store.indirect %arg0, %0 : !hal.buffer -> !util.ptr<!hal.buffer> return }
diff --git a/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/BUILD b/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/BUILD index d03e4d9..1587a93 100644 --- a/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/BUILD +++ b/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/BUILD
@@ -20,7 +20,7 @@ ], deps = [ "//iree/compiler/Dialect/HAL/IR", - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//mlir:IR", "@llvm-project//mlir:StandardOps", "@llvm-project//mlir:Transforms",
diff --git a/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/CMakeLists.txt b/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/CMakeLists.txt index c863164..eaa63ea 100644 --- a/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/CMakeLists.txt +++ b/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/CMakeLists.txt
@@ -22,7 +22,7 @@ MLIRStandard MLIRTransforms iree::compiler::Dialect::HAL::IR - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/ConvertIREEToHAL.cpp b/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/ConvertIREEToHAL.cpp index 0320dfa..da50553 100644 --- a/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/ConvertIREEToHAL.cpp +++ b/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/ConvertIREEToHAL.cpp
@@ -7,7 +7,7 @@ #include "iree/compiler/Dialect/HAL/Conversion/IREEToHAL/ConvertIREEToHAL.h" #include "iree/compiler/Dialect/HAL/IR/HALOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/PatternMatch.h" @@ -19,12 +19,13 @@ namespace { class DynamicShapeConstantOpConversion - : public OpConversionPattern<IREE::DynamicShapeConstantOp> { + : public OpConversionPattern<IREE::Util::DynamicShapeConstantOp> { public: - using OpConversionPattern<IREE::DynamicShapeConstantOp>::OpConversionPattern; + using OpConversionPattern< + IREE::Util::DynamicShapeConstantOp>::OpConversionPattern; LogicalResult matchAndRewrite( - IREE::DynamicShapeConstantOp constantOp, + IREE::Util::DynamicShapeConstantOp constantOp, llvm::ArrayRef<Value> newOperands, ConversionPatternRewriter &rewriter) const override { assert(newOperands.empty() && "dynamic_shape_constant takes no operands"); @@ -63,7 +64,7 @@ auto view = rewriter.createOrFold<IREE::HAL::BufferViewCreateOp>( constantOp.getLoc(), buffer, elementType.getValue(), shape); - rewriter.replaceOpWithNewOp<IREE::DoNotOptimizeOp>(constantOp, view); + rewriter.replaceOpWithNewOp<IREE::Util::DoNotOptimizeOp>(constantOp, view); return success(); } }; @@ -102,26 +103,30 @@ void populateIREEToHALPatterns(MLIRContext *context, ConversionTarget &target, TypeConverter &typeConverter, OwningRewritePatternList &patterns) { - target.addIllegalOp<IREE::DynamicShapeConstantOp>(); + target.addIllegalOp<IREE::Util::DynamicShapeConstantOp>(); patterns.insert<DynamicShapeConstantOpConversion>(context); - typeConverter.addConversion([&](IREE::ListType type) { + typeConverter.addConversion([&](IREE::Util::ListType type) { auto elementType = typeConverter.convertType(type.getElementType()); - return IREE::ListType::get(elementType); + return IREE::Util::ListType::get(elementType); }); - target.addDynamicallyLegalOp<IREE::ListCreateOp>([&](IREE::ListCreateOp op) { - return typeConverter.isLegal(op.getType()); - }); - target.addDynamicallyLegalOp<IREE::ListGetOp>( - [&](IREE::ListGetOp op) { return typeConverter.isLegal(op.getType()); }); - target.addDynamicallyLegalOp<IREE::ListSetOp>([&](IREE::ListSetOp op) { - return typeConverter.isLegal(op.value().getType()); - }); - patterns.insert<GenericConvertTypesConversion<IREE::ListCreateOp>, - GenericConvertTypesConversion<IREE::ListGetOp>, - GenericConvertTypesConversion<IREE::ListSetOp>>(typeConverter, - context); + target.addDynamicallyLegalOp<IREE::Util::ListCreateOp>( + [&](IREE::Util::ListCreateOp op) { + return typeConverter.isLegal(op.getType()); + }); + target.addDynamicallyLegalOp<IREE::Util::ListGetOp>( + [&](IREE::Util::ListGetOp op) { + return typeConverter.isLegal(op.getType()); + }); + target.addDynamicallyLegalOp<IREE::Util::ListSetOp>( + [&](IREE::Util::ListSetOp op) { + return typeConverter.isLegal(op.value().getType()); + }); + patterns.insert<GenericConvertTypesConversion<IREE::Util::ListCreateOp>, + GenericConvertTypesConversion<IREE::Util::ListGetOp>, + GenericConvertTypesConversion<IREE::Util::ListSetOp>>( + typeConverter, context); } } // namespace iree_compiler
diff --git a/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/test/shape_constants.mlir b/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/test/shape_constants.mlir index 96102a5..a043e26 100644 --- a/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/test/shape_constants.mlir +++ b/iree/compiler/Dialect/HAL/Conversion/IREEToHAL/test/shape_constants.mlir
@@ -8,7 +8,7 @@ // CHECK-SAME: usage("Constant|Transfer|Mapping|Dispatch") // CHECK-SAME: : !hal.buffer = dense<2> : tensor<2xi32> // CHECK: %[[VIEW:.+]] = hal.buffer_view.create %[[BUFFER]], element_type = %c16777248_i32, shape = [%c2] : !hal.buffer -> !hal.buffer_view - // CHECK-NEXT: %[[RET:.+]] = iree.do_not_optimize(%[[VIEW]]) : !hal.buffer_view - %c = iree.dynamic_shape_constant dense<2> : tensor<2xi32> -> tensor<?xi32> + // CHECK-NEXT: %[[RET:.+]] = util.do_not_optimize(%[[VIEW]]) : !hal.buffer_view + %c = util.dynamic_shape_constant dense<2> : tensor<2xi32> -> tensor<?xi32> return }
diff --git a/iree/compiler/Dialect/HAL/Conversion/TypeConverter.cpp b/iree/compiler/Dialect/HAL/Conversion/TypeConverter.cpp index ce5d4f2..5a54553 100644 --- a/iree/compiler/Dialect/HAL/Conversion/TypeConverter.cpp +++ b/iree/compiler/Dialect/HAL/Conversion/TypeConverter.cpp
@@ -9,7 +9,7 @@ #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" #include "iree/compiler/Dialect/HAL/Utils/TypeUtils.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" namespace mlir { namespace iree_compiler { @@ -77,12 +77,12 @@ // Recursively handle pointer target types (we want to convert // ptr<tensor<...>> to ptr<!hal.buffer<...>>, for example). - addConversion([this](IREE::PtrType type) -> Type { + addConversion([this](IREE::Util::PtrType type) -> Type { auto targetType = convertType(type.getTargetType()); if (!targetType) { return Type(); } - return IREE::PtrType::get(targetType); + return IREE::Util::PtrType::get(targetType); }); }
diff --git a/iree/compiler/Dialect/HAL/IR/BUILD b/iree/compiler/Dialect/HAL/IR/BUILD index fa2fe23..9586bfc 100644 --- a/iree/compiler/Dialect/HAL/IR/BUILD +++ b/iree/compiler/Dialect/HAL/IR/BUILD
@@ -32,8 +32,8 @@ include = ["*.td"], ), deps = [ - "//iree/compiler/Dialect/IREE/IR:td_files", "//iree/compiler/Dialect/Shape/IR:td_files", + "//iree/compiler/Dialect/Util/IR:td_files", "@llvm-project//mlir:OpBaseTdFiles", "@llvm-project//mlir:StdOpsTdFiles", "@llvm-project//mlir:ViewLikeInterfaceTdFiles", @@ -82,8 +82,8 @@ ":HALTypesGen", ":LoweringConfigEnumGen", ":LoweringConfigGen", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Shape/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR", "@llvm-project//mlir:MemRefDialect", @@ -105,8 +105,8 @@ "//iree/compiler/Dialect/HAL:hal_imports", "//iree/compiler/Dialect/HAL/Conversion/HALToHAL", "//iree/compiler/Dialect/HAL/Conversion/HALToVM", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Shape/IR", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Dialect/VM/Conversion", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR",
diff --git a/iree/compiler/Dialect/HAL/IR/CMakeLists.txt b/iree/compiler/Dialect/HAL/IR/CMakeLists.txt index bcfaec9..341d7ea 100644 --- a/iree/compiler/Dialect/HAL/IR/CMakeLists.txt +++ b/iree/compiler/Dialect/HAL/IR/CMakeLists.txt
@@ -59,8 +59,8 @@ MLIRSupport MLIRTransformUtils MLIRViewLikeInterface - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Shape::IR + iree::compiler::Dialect::Util::IR PUBLIC ) @@ -82,8 +82,8 @@ iree::compiler::Dialect::HAL::Conversion::HALToHAL iree::compiler::Dialect::HAL::Conversion::HALToVM iree::compiler::Dialect::HAL::hal_imports - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Shape::IR + iree::compiler::Dialect::Util::IR iree::compiler::Dialect::VM::Conversion PUBLIC )
diff --git a/iree/compiler/Dialect/HAL/IR/HALBase.td b/iree/compiler/Dialect/HAL/IR/HALBase.td index c4a4524..d61387d 100644 --- a/iree/compiler/Dialect/HAL/IR/HALBase.td +++ b/iree/compiler/Dialect/HAL/IR/HALBase.td
@@ -347,7 +347,7 @@ HAL_Buffer, ]>; -def HAL_OrdinalAttr : IREE_IndexAttrBase<"size_t">; +def HAL_OrdinalAttr : Util_IndexAttrBase<"size_t">; def HAL_ExecutableDataAttr : SignlessIntElementsAttr<8>; @@ -356,11 +356,11 @@ I32, "element type attribute">; def HAL_DeviceSize : TypeAlias<Index>; -def HAL_DeviceSizeAttr : IREE_IndexAttrBase<"iree_device_size_t">; +def HAL_DeviceSizeAttr : Util_IndexAttrBase<"iree_device_size_t">; def HAL_DeviceSizes : Variadic<HAL_DeviceSize>; def HAL_HostSize : TypeAlias<Index>; -def HAL_HostSizeAttr : IREE_IndexAttrBase<"size_t">; +def HAL_HostSizeAttr : Util_IndexAttrBase<"size_t">; def HAL_TimelineValue : TypeAlias<Index>; @@ -370,7 +370,7 @@ def HAL_VariableType : AnyTypeOf<[HAL_PrimitiveType, AnyVector, HAL_ObjectType]>; def HAL_VariablePtr : PtrOf<HAL_VariableType>; -def HAL_IndexAttr : IREE_IndexAttrBase<"index">; +def HAL_IndexAttr : Util_IndexAttrBase<"index">; def HAL_IndexArrayAttr : TypedArrayAttrBase<HAL_IndexAttr, "index array attribute"> { let constBuilderCall = "$_builder.getIndexArrayAttr($0)"; @@ -383,7 +383,7 @@ // TODO(benvanik): assert rank 3 def HAL_WorkgroupSizeAttr : TypedArrayAttrBase< - IREE_IndexAttrBase<"size_t">, + Util_IndexAttrBase<"size_t">, "index array attribute"> { let constBuilderCall = "$_builder.getIndexArrayAttr($0)"; } @@ -412,40 +412,40 @@ //===----------------------------------------------------------------------===// def HAL_BufferConstraintsAttr : - IREE_StructAttr<"buffer_constraints", "BufferConstraintsAttr", HAL_Dialect, [ + Util_StructAttr<"buffer_constraints", "BufferConstraintsAttr", HAL_Dialect, [ // The maximum size of a memory allocation that can be created, even if // there is more space available in the heap. - IREE_StructFieldAttr<"max_allocation_size", HAL_DeviceSizeAttr>, + Util_StructFieldAttr<"max_allocation_size", HAL_DeviceSizeAttr>, // The minimum required alignment, in bytes, for offsets used in runtime // buffer bindings for target backends. Offset values (both dynamic and // static) must be an integer multiple of this limit. - IREE_StructFieldAttr<"min_buffer_offset_alignment", HAL_DeviceSizeAttr>, + Util_StructFieldAttr<"min_buffer_offset_alignment", HAL_DeviceSizeAttr>, // The maximum value that can be specified for size ranges of buffer // bindings. The underlying allocation may be larger than this but only // up to this amount will be visible to kernels. - IREE_StructFieldAttr<"max_buffer_range", HAL_DeviceSizeAttr>, + Util_StructFieldAttr<"max_buffer_range", HAL_DeviceSizeAttr>, // The minimum required alignment, in bytes, for size ranges of buffer // bindings. - IREE_StructFieldAttr<"min_buffer_range_alignment", HAL_DeviceSizeAttr>, + Util_StructFieldAttr<"min_buffer_range_alignment", HAL_DeviceSizeAttr>, ]> { let cppNamespace = "mlir::iree_compiler::IREE::HAL"; } def HAL_ByteRangeAttr : - IREE_StructAttr<"byte_range", "ByteRangeAttr", HAL_Dialect, [ - IREE_StructFieldAttr<"offset", HAL_DeviceSizeAttr>, - IREE_StructFieldAttr<"length", HAL_DeviceSizeAttr>, + Util_StructAttr<"byte_range", "ByteRangeAttr", HAL_Dialect, [ + Util_StructFieldAttr<"offset", HAL_DeviceSizeAttr>, + Util_StructFieldAttr<"length", HAL_DeviceSizeAttr>, ]> { let cppNamespace = "mlir::iree_compiler::IREE::HAL"; } def HAL_DescriptorSetLayoutBindingAttr : - IREE_StructAttr<"descriptor_set_layout_binding", + Util_StructAttr<"descriptor_set_layout_binding", "DescriptorSetLayoutBindingAttr", HAL_Dialect, [ - IREE_StructFieldAttr<"binding", I32Attr>, - IREE_StructFieldAttr<"type", HAL_DescriptorTypeAttr>, - IREE_StructFieldAttr<"access", HAL_MemoryAccessBitfieldAttr>, + Util_StructFieldAttr<"binding", I32Attr>, + Util_StructFieldAttr<"type", HAL_DescriptorTypeAttr>, + Util_StructFieldAttr<"access", HAL_MemoryAccessBitfieldAttr>, ]> { let cppNamespace = "mlir::iree_compiler::IREE::HAL"; } @@ -586,40 +586,40 @@ // TODO(benvanik): drop this when we have a HAL dispatch abstraction. def HAL_ExConstantStorageAttr : - IREE_StructAttr<"ex.constant_storage", + Util_StructAttr<"ex.constant_storage", "ExConstantStorageAttr", HAL_Dialect, [ - IREE_StructFieldAttr<"binding", StrAttr>, - IREE_StructFieldAttr<"storage", StrAttr>, - IREE_StructFieldAttr<"offset", IndexAttr>, - IREE_StructFieldAttr<"length", IndexAttr>, + Util_StructFieldAttr<"binding", StrAttr>, + Util_StructFieldAttr<"storage", StrAttr>, + Util_StructFieldAttr<"offset", IndexAttr>, + Util_StructFieldAttr<"length", IndexAttr>, ]> { let cppNamespace = "mlir::iree_compiler::IREE::HAL"; } def HAL_ExPushConstantAttr : - IREE_StructAttr<"ex.push_constant", + Util_StructAttr<"ex.push_constant", "ExPushConstantAttr", HAL_Dialect, [ - IREE_StructFieldAttr<"ordinal", IndexAttr>, - IREE_StructFieldAttr<"operand", IndexAttr>, + Util_StructFieldAttr<"ordinal", IndexAttr>, + Util_StructFieldAttr<"operand", IndexAttr>, ]> { let cppNamespace = "mlir::iree_compiler::IREE::HAL"; } def HAL_ExOperandBufferAttr : - IREE_StructAttr<"ex.operand_buffer", + Util_StructAttr<"ex.operand_buffer", "ExOperandBufferAttr", HAL_Dialect, [ - IREE_StructFieldAttr<"binding", StrAttr>, - IREE_StructFieldAttr<"operand", IndexAttr>, + Util_StructFieldAttr<"binding", StrAttr>, + Util_StructFieldAttr<"operand", IndexAttr>, ]> { let cppNamespace = "mlir::iree_compiler::IREE::HAL"; } def HAL_ExResultBufferAttr : - IREE_StructAttr<"ex.result_buffer", + Util_StructAttr<"ex.result_buffer", "ExResultBufferAttr", HAL_Dialect, [ - IREE_StructFieldAttr<"binding", StrAttr>, - IREE_StructFieldAttr<"result", IndexAttr>, + Util_StructFieldAttr<"binding", StrAttr>, + Util_StructFieldAttr<"result", IndexAttr>, ]> { let cppNamespace = "mlir::iree_compiler::IREE::HAL"; }
diff --git a/iree/compiler/Dialect/HAL/IR/HALDialect.cpp b/iree/compiler/Dialect/HAL/IR/HALDialect.cpp index cbe5318..b4ba817 100644 --- a/iree/compiler/Dialect/HAL/IR/HALDialect.cpp +++ b/iree/compiler/Dialect/HAL/IR/HALDialect.cpp
@@ -12,7 +12,7 @@ #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" #include "iree/compiler/Dialect/HAL/IR/LoweringConfig.h" #include "iree/compiler/Dialect/HAL/hal.imports.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" #include "iree/compiler/Dialect/VM/Conversion/ConversionDialectInterface.h" #include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/SourceMgr.h" @@ -107,7 +107,7 @@ HALDialect::HALDialect(MLIRContext *context) : Dialect(getDialectNamespace(), context, TypeID::get<HALDialect>()) { - context->loadDialect<IREEDialect>(); + context->loadDialect<IREE::Util::UtilDialect>(); registerAttributes(); registerTypes();
diff --git a/iree/compiler/Dialect/HAL/IR/HALDialect.h b/iree/compiler/Dialect/HAL/IR/HALDialect.h index 5e3f4d2..2a9e824 100644 --- a/iree/compiler/Dialect/HAL/IR/HALDialect.h +++ b/iree/compiler/Dialect/HAL/IR/HALDialect.h
@@ -30,9 +30,7 @@ Location loc) override; private: - /// Register the attributes of this dialect. void registerAttributes(); - /// Register the types of this dialect. void registerTypes(); };
diff --git a/iree/compiler/Dialect/HAL/IR/HALDialect.td b/iree/compiler/Dialect/HAL/IR/HALDialect.td index 5939e66..c535e42 100644 --- a/iree/compiler/Dialect/HAL/IR/HALDialect.td +++ b/iree/compiler/Dialect/HAL/IR/HALDialect.td
@@ -7,7 +7,7 @@ #ifndef IREE_DIALECT_HAL_DIALECT #define IREE_DIALECT_HAL_DIALECT -include "iree/compiler/Dialect/IREE/IR/IREEBase.td" +include "iree/compiler/Dialect/Util/IR/UtilBase.td" //===----------------------------------------------------------------------===// // IREE HAL (Hardware Abstraction Layer) dialect
diff --git a/iree/compiler/Dialect/HAL/IR/HALInterfaces.td b/iree/compiler/Dialect/HAL/IR/HALInterfaces.td index 42b3f7d..ac0d32c 100644 --- a/iree/compiler/Dialect/HAL/IR/HALInterfaces.td +++ b/iree/compiler/Dialect/HAL/IR/HALInterfaces.td
@@ -7,7 +7,7 @@ #ifndef IREE_DIALECT_HAL_INTERFACES #define IREE_DIALECT_HAL_INTERFACES -include "iree/compiler/Dialect/IREE/IR/IREEBase.td" +include "iree/compiler/Dialect/Util/IR/UtilBase.td" //===----------------------------------------------------------------------===// // IREE::HAL::SizeAwareOpInterface
diff --git a/iree/compiler/Dialect/HAL/IR/HALOpFolders.cpp b/iree/compiler/Dialect/HAL/IR/HALOpFolders.cpp index 3b13402..c89aa40 100644 --- a/iree/compiler/Dialect/HAL/IR/HALOpFolders.cpp +++ b/iree/compiler/Dialect/HAL/IR/HALOpFolders.cpp
@@ -5,7 +5,7 @@ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception #include "iree/compiler/Dialect/HAL/IR/HALOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringExtras.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" @@ -320,8 +320,8 @@ IREE::HAL::BufferUsageBitfield::Constant; Type bufferType = IREE::HAL::BufferType::get(rewriter.getContext()); - auto hostBuffer = rewriter.createOrFold<IREE::ByteBufferConstantOp>( - op.getLoc(), IREE::ByteBufferType::get(rewriter.getContext()), + auto hostBuffer = rewriter.createOrFold<IREE::Util::ByteBufferConstantOp>( + op.getLoc(), IREE::Util::ByteBufferType::get(rewriter.getContext()), op.value()); auto zero = rewriter.createOrFold<mlir::ConstantIndexOp>(op.getLoc(), 0); auto neg1 = rewriter.createOrFold<mlir::ConstantIndexOp>(op.getLoc(), -1);
diff --git a/iree/compiler/Dialect/HAL/IR/HALOps.cpp b/iree/compiler/Dialect/HAL/IR/HALOps.cpp index e172be6..3dec9ce 100644 --- a/iree/compiler/Dialect/HAL/IR/HALOps.cpp +++ b/iree/compiler/Dialect/HAL/IR/HALOps.cpp
@@ -7,8 +7,9 @@ #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" #include "iree/compiler/Dialect/Shape/IR/Builders.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" +#include "llvm/ADT/Hashing.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/SMLoc.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" @@ -294,7 +295,7 @@ } Value TensorCastOp::getTiedResult(unsigned resultIndex) { - return IREE::TiedOpInterface::findTiedBaseValue(source()); + return IREE::Util::TiedOpInterface::findTiedBaseValue(source()); } ::llvm::Optional<unsigned> TensorCastOp::getTiedResultOperandIndex( @@ -513,7 +514,7 @@ static LogicalResult verifyVariableLoadIndirectOp(VariableLoadIndirectOp &op) { auto variableType = - op.variable().getType().cast<IREE::PtrType>().getTargetType(); + op.variable().getType().cast<IREE::Util::PtrType>().getTargetType(); auto loadType = op.result().getType(); if (!isVariableTypeCompatible(variableType, loadType)) { return op.emitOpError() << "variable type mismatch; variable pointer is " @@ -552,7 +553,7 @@ static LogicalResult verifyVariableStoreIndirectOp( VariableStoreIndirectOp &op) { auto variableType = - op.variable().getType().cast<IREE::PtrType>().getTargetType(); + op.variable().getType().cast<IREE::Util::PtrType>().getTargetType(); auto storeType = op.value().getType(); if (!isVariableTypeCompatible(variableType, storeType)) { return op.emitOpError() << "variable type mismatch; variable pointer is " @@ -1485,6 +1486,15 @@ }); } +llvm::hash_code InterfaceOp::getInterfaceHash() { + auto range = llvm::map_range(getBlock().getOps<InterfaceBindingOp>(), + [](InterfaceBindingOp bindingOp) { + return bindingOp.getDescriptorHash(); + }); + return llvm::hash_combine( + push_constants(), llvm::hash_combine_range(range.begin(), range.end())); +} + //===----------------------------------------------------------------------===// // hal.interface.binding //===----------------------------------------------------------------------===// @@ -1537,6 +1547,13 @@ }); } +llvm::hash_code InterfaceBindingOp::getDescriptorHash() { + // Use the unwrapped attribute accessors so that we can have determinstic + // hashes. Hashing against the wrapped attributes are hashing against pointer + // values, which change per run. + return llvm::hash_combine(set(), binding(), type(), access()); +} + //===----------------------------------------------------------------------===// // hal.interface.binding.subspan //===----------------------------------------------------------------------===//
diff --git a/iree/compiler/Dialect/HAL/IR/HALOps.h b/iree/compiler/Dialect/HAL/IR/HALOps.h index d739200..d25ea13 100644 --- a/iree/compiler/Dialect/HAL/IR/HALOps.h +++ b/iree/compiler/Dialect/HAL/IR/HALOps.h
@@ -11,9 +11,9 @@ #include "iree/compiler/Dialect/HAL/IR/HALTraits.h" #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" -#include "iree/compiler/Dialect/IREE/IR/IREETraits.h" #include "iree/compiler/Dialect/Shape/IR/ShapeDialect.h" #include "iree/compiler/Dialect/Shape/IR/ShapeTypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTraits.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h"
diff --git a/iree/compiler/Dialect/HAL/IR/HALOps.td b/iree/compiler/Dialect/HAL/IR/HALOps.td index ac62c81..199355c 100644 --- a/iree/compiler/Dialect/HAL/IR/HALOps.td +++ b/iree/compiler/Dialect/HAL/IR/HALOps.td
@@ -9,7 +9,7 @@ include "iree/compiler/Dialect/HAL/IR/HALBase.td" include "iree/compiler/Dialect/HAL/IR/HALInterfaces.td" -include "iree/compiler/Dialect/IREE/IR/IREEInterfaces.td" +include "iree/compiler/Dialect/Util/IR/UtilInterfaces.td" include "iree/compiler/Dialect/Shape/IR/ShapeInterfaces.td" include "mlir/IR/OpAsmInterface.td" include "mlir/IR/SymbolInterfaces.td" @@ -60,7 +60,7 @@ def HAL_TensorCastOp : HAL_PureOp<"tensor.cast", [ AttrSizedOperandSegments, - DeclareOpInterfaceMethods<IREE_TiedOpInterface, [ + DeclareOpInterfaceMethods<Util_TiedOpInterface, [ "getTiedResult", "getTiedResultOperandIndex", "getTiedResultOperandIndices", @@ -291,7 +291,7 @@ }]; let arguments = (ins - IREE_Status:$status, + Util_Status:$status, OptionalAttr<StrAttr>:$message ); @@ -2272,6 +2272,10 @@ // Returns true if the all bindings in the interface match exactly those // in |other| (including order). bool isEquivalentTo(IREE::HAL::InterfaceOp other); + + // Returns a hash for the interface, considering the push constant and + // all bindings. + llvm::hash_code getInterfaceHash(); }]; } @@ -2311,6 +2315,12 @@ HAL_DescriptorTypeAttr:$type, HAL_MemoryAccessBitfieldAttr:$access ); + + let extraClassDeclaration = [{ + /// Returns a hash for the descriptor, considering the set, binding, + /// type, and access. + llvm::hash_code getDescriptorHash(); + }]; } def HAL_InterfaceWorkgroupIDOp : HAL_PureOp<"interface.workgroup.id", [ @@ -2549,7 +2559,7 @@ HAL_Semaphore:$semaphore ); let results = (outs - IREE_Status:$status, + Util_Status:$status, HAL_TimelineValue:$value ); @@ -2589,7 +2599,7 @@ let arguments = (ins HAL_Semaphore:$semaphore, - IREE_Status:$status + Util_Status:$status ); let assemblyFormat = [{ @@ -2614,7 +2624,7 @@ HAL_TimelineValue:$min_value ); let results = (outs - IREE_Status:$status + Util_Status:$status ); let assemblyFormat = [{
diff --git a/iree/compiler/Dialect/HAL/IR/HALTypes.cpp b/iree/compiler/Dialect/HAL/IR/HALTypes.cpp index d3c7d2a..a4952ec 100644 --- a/iree/compiler/Dialect/HAL/IR/HALTypes.cpp +++ b/iree/compiler/Dialect/HAL/IR/HALTypes.cpp
@@ -8,7 +8,7 @@ #include "iree/compiler/Dialect/HAL/IR/HALDialect.h" #include "iree/compiler/Dialect/HAL/IR/HALOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "llvm/ADT/StringExtras.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Builders.h" @@ -374,7 +374,7 @@ } // Skip do-not-optimize ops. - if (auto dnoOp = dyn_cast<IREE::DoNotOptimizeOp>(definingOp)) { + if (auto dnoOp = dyn_cast<IREE::Util::DoNotOptimizeOp>(definingOp)) { return lookupValueSize(dnoOp.getOperand(0)); }
diff --git a/iree/compiler/Dialect/HAL/IR/HALTypes.h b/iree/compiler/Dialect/HAL/IR/HALTypes.h index 07b6095..d8d4608 100644 --- a/iree/compiler/Dialect/HAL/IR/HALTypes.h +++ b/iree/compiler/Dialect/HAL/IR/HALTypes.h
@@ -9,7 +9,7 @@ #include <cstdint> -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/Optional.h"
diff --git a/iree/compiler/Dialect/HAL/IR/LoweringConfig.td b/iree/compiler/Dialect/HAL/IR/LoweringConfig.td index 1bf8744..d0f4111 100644 --- a/iree/compiler/Dialect/HAL/IR/LoweringConfig.td +++ b/iree/compiler/Dialect/HAL/IR/LoweringConfig.td
@@ -18,6 +18,8 @@ : I32EnumAttrCase<"LLVMGPUDistribute", 2>; def LLVMGPU_Vectorize : I32EnumAttrCase<"LLVMGPUVectorize", 3>; +def LLVMGPU_MatmulSimt + : I32EnumAttrCase<"LLVMGPUMatmulSimt", 4>; // EnumAttrCase for all known lowerings for ops within dispatch region // to scalar/native-vector code. @@ -25,7 +27,7 @@ "DispatchLoweringPassPipeline", "identifier for pass pipeline use to lower dispatch region", [CPU_Default, CPU_Vectorization, LLVMGPU_SimpleDistribute, - LLVMGPU_Vectorize]> { + LLVMGPU_Vectorize, LLVMGPU_MatmulSimt]> { let cppNamespace = "::mlir::iree_compiler::IREE::HAL"; }
diff --git a/iree/compiler/Dialect/HAL/IR/test/allocator_op_folding.mlir b/iree/compiler/Dialect/HAL/IR/test/allocator_op_folding.mlir index 02301a3..2df6fb6 100644 --- a/iree/compiler/Dialect/HAL/IR/test/allocator_op_folding.mlir +++ b/iree/compiler/Dialect/HAL/IR/test/allocator_op_folding.mlir
@@ -3,9 +3,9 @@ // CHECK-LABEL: @allocator_constant_buffer // CHECK-SAME: %[[ALLOCATOR:.+]]: !hal.allocator func @allocator_constant_buffer(%allocator: !hal.allocator) -> !hal.buffer { - // CHECK: %[[RODATA:.+]] = iree.byte_buffer.constant : !iree.byte_buffer = dense<123> : tensor<4x4xi32> + // CHECK: %[[RODATA:.+]] = util.byte_buffer.constant : !util.byte_buffer = dense<123> : tensor<4x4xi32> // CHECK-NEXT: %[[BUFFER:.+]] = hal.allocator.map<%[[ALLOCATOR]] : !hal.allocator> - // CHECK-SAME: source(%[[RODATA]] : !iree.byte_buffer)[%c0, %c-1] + // CHECK-SAME: source(%[[RODATA]] : !util.byte_buffer)[%c0, %c-1] // CHECK-SAME: type("HostVisible|DeviceVisible|DeviceLocal") // CHECK-SAME: usage("Constant|Transfer|Mapping|Dispatch") // CHECK-SAME: : !hal.buffer @@ -21,9 +21,9 @@ // CHECK-LABEL: @allocator_constant_buffer_view // CHECK-SAME: %[[ALLOCATOR:.+]]: !hal.allocator func @allocator_constant_buffer_view(%allocator: !hal.allocator) -> !hal.buffer_view { - // CHECK: %[[RODATA:.+]] = iree.byte_buffer.constant : !iree.byte_buffer = dense<123> : tensor<4x4xi32> + // CHECK: %[[RODATA:.+]] = util.byte_buffer.constant : !util.byte_buffer = dense<123> : tensor<4x4xi32> // CHECK-NEXT: %[[BUFFER:.+]] = hal.allocator.map<%[[ALLOCATOR]] : !hal.allocator> - // CHECK-SAME: source(%[[RODATA]] : !iree.byte_buffer)[%c0, %c-1] + // CHECK-SAME: source(%[[RODATA]] : !util.byte_buffer)[%c0, %c-1] // CHECK-SAME: type("HostVisible|DeviceVisible|DeviceLocal") // CHECK-SAME: usage("Constant|Transfer|Mapping|Dispatch") // CHECK-SAME: : !hal.buffer
diff --git a/iree/compiler/Dialect/HAL/IR/test/allocator_ops.mlir b/iree/compiler/Dialect/HAL/IR/test/allocator_ops.mlir index 66abe14..69d454f 100644 --- a/iree/compiler/Dialect/HAL/IR/test/allocator_ops.mlir +++ b/iree/compiler/Dialect/HAL/IR/test/allocator_ops.mlir
@@ -110,18 +110,18 @@ // CHECK-LABEL: @allocator_map_byte_buffer // CHECK-SAME: %[[ALLOCATOR:.+]]: !hal.allocator -func @allocator_map_byte_buffer(%allocator: !hal.allocator, %arg1: !iree.byte_buffer) { +func @allocator_map_byte_buffer(%allocator: !hal.allocator, %arg1: !util.byte_buffer) { // CHECK-DAG: %[[OFFSET:.+]] = constant 100 %offset = constant 100 : index // CHECK-DAG: %[[LENGTH:.+]] = constant 200 %length = constant 200 : index // CHECK: = hal.allocator.map<%[[ALLOCATOR]] : !hal.allocator> - // CHECK-SAME: source(%arg1 : !iree.byte_buffer)[%[[OFFSET]], %[[LENGTH]]] + // CHECK-SAME: source(%arg1 : !util.byte_buffer)[%[[OFFSET]], %[[LENGTH]]] // CHECK-SAME: type("DeviceVisible|DeviceLocal") // CHECK-SAME: usage(Transfer) // CHECK-SAME: : !hal.buffer %ref = hal.allocator.map<%allocator : !hal.allocator> - source(%arg1 : !iree.byte_buffer)[%offset, %length] + source(%arg1 : !util.byte_buffer)[%offset, %length] type(DeviceLocal) usage(Transfer) : !hal.buffer return }
diff --git a/iree/compiler/Dialect/HAL/IR/test/constant_ops.mlir b/iree/compiler/Dialect/HAL/IR/test/constant_ops.mlir index 6c26f70..125027e 100644 --- a/iree/compiler/Dialect/HAL/IR/test/constant_ops.mlir +++ b/iree/compiler/Dialect/HAL/IR/test/constant_ops.mlir
@@ -67,8 +67,8 @@ // CHECK: func @storage_lookup func @storage_lookup() { - // CHECK-NEXT: = hal.constant_storage.lookup @pool::@_storage1 : !iree.byte_buffer - %storage = hal.constant_storage.lookup @pool::@_storage1 : !iree.byte_buffer + // CHECK-NEXT: = hal.constant_storage.lookup @pool::@_storage1 : !util.byte_buffer + %storage = hal.constant_storage.lookup @pool::@_storage1 : !util.byte_buffer return }
diff --git a/iree/compiler/Dialect/HAL/IR/test/invalid.mlir b/iree/compiler/Dialect/HAL/IR/test/invalid.mlir index 2341e0e..e1b918f 100644 --- a/iree/compiler/Dialect/HAL/IR/test/invalid.mlir +++ b/iree/compiler/Dialect/HAL/IR/test/invalid.mlir
@@ -10,8 +10,8 @@ // ----- hal.variable @var mutable : !hal.buffer func @fn(%arg0: !hal.buffer_view) { - %0 = hal.variable.address @var_indirect_with_buffer_view_store : !iree.ptr<!hal.buffer> + %0 = hal.variable.address @var_indirect_with_buffer_view_store : !util.ptr<!hal.buffer> // expected-error @+1 {{variable pointer is '!hal.buffer' but store is '!hal.buffer_view'}} - hal.variable.store.indirect %arg0, %0 : !hal.buffer_view -> !iree.ptr<!hal.buffer> + hal.variable.store.indirect %arg0, %0 : !hal.buffer_view -> !util.ptr<!hal.buffer> return }
diff --git a/iree/compiler/Dialect/HAL/IR/test/variable_folding.mlir b/iree/compiler/Dialect/HAL/IR/test/variable_folding.mlir index 887006f..2d7a1f9 100644 --- a/iree/compiler/Dialect/HAL/IR/test/variable_folding.mlir +++ b/iree/compiler/Dialect/HAL/IR/test/variable_folding.mlir
@@ -35,9 +35,9 @@ hal.variable @v : !hal.buffer // CHECK-LABEL: @fold_load_indirect func @fold_load_indirect() -> !hal.buffer { - %0 = hal.variable.address @v : !iree.ptr<!hal.buffer> + %0 = hal.variable.address @v : !util.ptr<!hal.buffer> // CHECK-NEXT: = hal.variable.load @v - %1 = hal.variable.load.indirect %0 : !iree.ptr<!hal.buffer> -> !hal.buffer + %1 = hal.variable.load.indirect %0 : !util.ptr<!hal.buffer> -> !hal.buffer return %1 : !hal.buffer } @@ -46,8 +46,8 @@ hal.variable @v mutable : !hal.buffer // CHECK-LABEL: @fold_store_indirect func @fold_store_indirect(%arg0 : !hal.buffer) { - %0 = hal.variable.address @v : !iree.ptr<!hal.buffer> + %0 = hal.variable.address @v : !util.ptr<!hal.buffer> // CHECK-NEXT: hal.variable.store %arg0, @v - hal.variable.store.indirect %arg0, %0 : !hal.buffer -> !iree.ptr<!hal.buffer> + hal.variable.store.indirect %arg0, %0 : !hal.buffer -> !util.ptr<!hal.buffer> return }
diff --git a/iree/compiler/Dialect/HAL/IR/test/variable_ops.mlir b/iree/compiler/Dialect/HAL/IR/test/variable_ops.mlir index 4850682..198ed0e 100644 --- a/iree/compiler/Dialect/HAL/IR/test/variable_ops.mlir +++ b/iree/compiler/Dialect/HAL/IR/test/variable_ops.mlir
@@ -52,9 +52,9 @@ // CHECK-LABEL: @loaded_indirect func @loaded_indirect() { // CHECK-NEXT: %[[ADDR:.+]] = hal.variable.address @v_loaded - %0 = hal.variable.address @v_loaded : !iree.ptr<!hal.buffer> + %0 = hal.variable.address @v_loaded : !util.ptr<!hal.buffer> // CHECK-NEXT: = hal.variable.load.indirect %[[ADDR]] - %1 = hal.variable.load.indirect %0 : !iree.ptr<!hal.buffer> -> !hal.buffer + %1 = hal.variable.load.indirect %0 : !util.ptr<!hal.buffer> -> !hal.buffer return } @@ -66,8 +66,8 @@ // CHECK-NEXT: %[[BUF:.+]] = "test_hal.buffer" %0 = "test_hal.buffer"() : () -> !hal.buffer // CHECK-NEXT: %[[ADDR:.+]] = hal.variable.address @v_stored - %1 = hal.variable.address @v_stored : !iree.ptr<!hal.buffer> + %1 = hal.variable.address @v_stored : !util.ptr<!hal.buffer> // CHECK-NEXT: hal.variable.store.indirect %[[BUF]], %[[ADDR]] - hal.variable.store.indirect %0, %1 : !hal.buffer -> !iree.ptr<!hal.buffer> + hal.variable.store.indirect %0, %1 : !hal.buffer -> !util.ptr<!hal.buffer> return }
diff --git a/iree/compiler/Dialect/HAL/Target/BUILD b/iree/compiler/Dialect/HAL/Target/BUILD index 7b47cc7..f804469 100644 --- a/iree/compiler/Dialect/HAL/Target/BUILD +++ b/iree/compiler/Dialect/HAL/Target/BUILD
@@ -24,7 +24,7 @@ "//iree/compiler/Dialect/Flow/IR", "//iree/compiler/Dialect/HAL/IR", "//iree/compiler/Dialect/HAL/Utils", - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR", "@llvm-project//mlir:Pass",
diff --git a/iree/compiler/Dialect/HAL/Target/CMakeLists.txt b/iree/compiler/Dialect/HAL/Target/CMakeLists.txt index ef76291..2227647 100644 --- a/iree/compiler/Dialect/HAL/Target/CMakeLists.txt +++ b/iree/compiler/Dialect/HAL/Target/CMakeLists.txt
@@ -28,7 +28,7 @@ iree::compiler::Dialect::Flow::IR iree::compiler::Dialect::HAL::IR iree::compiler::Dialect::HAL::Utils - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir b/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir index 101cacb..dcc219c 100644 --- a/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir +++ b/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir
@@ -89,7 +89,7 @@ // CHECK-NEXT: hal.executable.entry_point @dispatch_1 attributes {interface = @io_0, ordinal = 1 : index} // CHECK-NEXT: hal.executable.entry_point @dispatch_2 attributes {interface = @io_1, ordinal = 2 : index} // CHECK-NEXT: module { -// CHECK-NEXT: vm.module @linked_module { +// CHECK-NEXT: vm.module public @linked_module { // CHECK-NEXT: vm.func @dispatch_0() { // CHECK-NEXT: vm.return // CHECK-NEXT: } @@ -197,7 +197,7 @@ // CHECK-NEXT: hal.executable.entry_point @dispatch_0 attributes {interface = @io_0, ordinal = 0 : index} // CHECK-NEXT: hal.executable.entry_point @dispatch_1 attributes {interface = @io_1, ordinal = 1 : index} // CHECK-NEXT: module { -// CHECK-NEXT: vm.module @linked_module { +// CHECK-NEXT: vm.module public @linked_module { // CHECK-NEXT: vm.func @dispatch_0() { // CHECK-NEXT: vm.return // CHECK-NEXT: } @@ -361,7 +361,7 @@ // CHECK: hal.executable @vmvx_linked attributes {sym_visibility = "private"} { // CHECK: hal.executable.variant @vmvx_bytecode_fb, target = #executable_target_vmvx_bytecode_fb { // CHECK: module { -// CHECK-NEXT: vm.module @linked_module { +// CHECK-NEXT: vm.module public @linked_module { // CHECK-NEXT: vm.rodata public @rodata_a dense<0> : tensor<1xi32> // CHECK-NEXT: vm.rodata public @rodata_b dense<0> : tensor<1xi32> // CHECK-NEXT: vm.rodata public @rodata_b_0 dense<0> : tensor<1xi32>
diff --git a/iree/compiler/Dialect/HAL/Target/VMVX/test/smoketest.mlir b/iree/compiler/Dialect/HAL/Target/VMVX/test/smoketest.mlir index 1fba155..f2e74bd 100644 --- a/iree/compiler/Dialect/HAL/Target/VMVX/test/smoketest.mlir +++ b/iree/compiler/Dialect/HAL/Target/VMVX/test/smoketest.mlir
@@ -46,8 +46,8 @@ // CHECK-SAME: ordinal = 0 : index // CHECK-SAME: } // CHECK: module { -// CHECK-NEXT: vm.module @module { -// CHECK-NEXT: vm.func @entry( +// CHECK-NEXT: vm.module public @module { +// CHECK-NEXT: vm.func private @entry( // CHECK-SAME: %[[SCRATCHPAD:.+]]: !vm.buffer, %[[CONSTANTS:.+]]: !vm.buffer, // CHECK-SAME: %[[BINDINGS:.+]]: !vm.list<!vm.buffer> // CHECK-DAG: %c16 = vm.const.i32 16 : i32
diff --git a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/BUILD b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/BUILD index f2dbc29..d896b64 100644 --- a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/BUILD +++ b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/BUILD
@@ -47,6 +47,7 @@ "@llvm-project//mlir:LinalgOps", "@llvm-project//mlir:Parser", "@llvm-project//mlir:SPIRVDialect", + "@llvm-project//mlir:SPIRVModuleCombiner", "@llvm-project//mlir:SPIRVSerialization", "@llvm-project//mlir:Support", "@llvm-project//mlir:VectorOps",
diff --git a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/CMakeLists.txt b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/CMakeLists.txt index e74b7ee..13c0474 100644 --- a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/CMakeLists.txt +++ b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/CMakeLists.txt
@@ -29,6 +29,7 @@ MLIRLinalg MLIRParser MLIRSPIRV + MLIRSPIRVModuleCombiner MLIRSPIRVSerialization MLIRSupport MLIRVector
diff --git a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/VulkanSPIRVTarget.cpp b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/VulkanSPIRVTarget.cpp index c46d4c2..1f4b8ea 100644 --- a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/VulkanSPIRVTarget.cpp +++ b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/VulkanSPIRVTarget.cpp
@@ -14,17 +14,23 @@ #include "iree/compiler/Dialect/Vulkan/Utils/TargetEnvironment.h" #include "iree/compiler/Utils/FlatbufferUtils.h" #include "iree/schemas/spirv_executable_def_builder.h" +#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/FormatVariadic.h" +#include "llvm/Support/Path.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/GPU/GPUDialect.h" #include "mlir/Dialect/Linalg/IR/LinalgTypes.h" #include "mlir/Dialect/SPIRV/IR/SPIRVDialect.h" #include "mlir/Dialect/SPIRV/IR/SPIRVOps.h" #include "mlir/Dialect/SPIRV/IR/TargetAndABI.h" +#include "mlir/Dialect/SPIRV/Linking/ModuleCombiner.h" #include "mlir/Dialect/Vector/VectorOps.h" #include "mlir/IR/BlockAndValueMapping.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/SymbolTable.h" #include "mlir/Parser.h" #include "mlir/Target/SPIRV/Serialization.h" @@ -33,6 +39,32 @@ namespace IREE { namespace HAL { +namespace { +llvm::Optional<FileLineColLoc> findFirstFileLoc(Location baseLoc) { + if (auto loc = baseLoc.dyn_cast<FusedLoc>()) { + for (auto &childLoc : loc.getLocations()) { + auto childResult = findFirstFileLoc(childLoc); + if (childResult) return childResult; + } + } else if (auto loc = baseLoc.dyn_cast<FileLineColLoc>()) { + return loc; + } + return llvm::None; +} + +std::string guessModuleName(mlir::ModuleOp moduleOp) { + std::string moduleName = + moduleOp.getName().hasValue() ? moduleOp.getName().getValue().str() : ""; + if (!moduleName.empty()) return moduleName; + auto loc = findFirstFileLoc(moduleOp.getLoc()); + if (loc.hasValue()) { + return llvm::sys::path::stem(loc.getValue().getFilename()).str(); + } else { + return "spirv_module"; + } +} +} // namespace + VulkanSPIRVTargetOptions getVulkanSPIRVTargetOptionsFromFlags() { // TODO(antiagainst): Enable option categories once the following bug is // fixed: https://bugs.llvm.org/show_bug.cgi?id=44223 static @@ -123,10 +155,131 @@ buildSPIRVCodegenPassPipeline(passManager, options_.codegenOptions); } + // TODO(antiagainst): Re-enable SPIR-V linking once the tensorflow integration + // crash is fixed. +/* + LogicalResult linkExecutables(mlir::ModuleOp moduleOp) override { + // Note: Vulkan flavored SPIR-V does not have linking in the conventional + // sense. For example, there is no cross-module symbol reference and symbol + // resolution and such. It's more just combining all SPIR-V modules into the + // one, with multiple entry points. + + // 1. Create source executable groups according to their executable + // interface. We only combine executables in the same group. + + // Map from an executable interface's hash to all source executables having + // that interface. + llvm::DenseMap<llvm::hash_code, SmallVector<IREE::HAL::ExecutableOp, 4>> + sourceExecutableOpGroups; + + int numExecutables = 0; + for (auto op : moduleOp.getOps<IREE::HAL::ExecutableOp>()) { + auto interfaceOps = + llvm::to_vector<1>(op.getBlock().getOps<IREE::HAL::InterfaceOp>()); + if (!llvm::hasSingleElement(interfaceOps)) { + return op->emitError("only one hal.interface is supported now"); + } + + llvm::hash_code hash = interfaceOps.front().getInterfaceHash(); + sourceExecutableOpGroups[hash].push_back(op); + + ++numExecutables; + } + if (numExecutables <= 1) return success(); + + SymbolTable symbolTable(moduleOp); + + auto sharedTargetsAttr = getExecutableTargets(moduleOp.getContext()); + if (llvm::size(sharedTargetsAttr) != 1) { + return moduleOp.emitError("only one executable target is supported now"); + } + + auto sharedTargetAttr = sharedTargetsAttr.getValue() + .front() + .cast<IREE::HAL::ExecutableTargetAttr>(); + + // Guess a module name, if needed, to make the output files readable. + auto moduleName = guessModuleName(moduleOp); + + // 2. Create "linked" executables for each source executable group. + // This just pulls in spv.module ops that should be combined into the same + // hal.executable.variant inner module. + + SmallVector<mlir::ModuleOp, 8> innerModuleOps; + innerModuleOps.reserve(sourceExecutableOpGroups.size()); + for (auto hashExecutablePair : sourceExecutableOpGroups) { + llvm::hash_code hash = hashExecutablePair.first; + const auto &sourceExecutableOps = hashExecutablePair.second; + + // Just one executable for this group. No need to link. + if (sourceExecutableOps.size() == 1) continue; + + OpBuilder builder(moduleOp.getContext()); + + // Create a new "linked" hal.executable for collecting all source + // executables in this group. + std::string linkedExecutableName = + llvm::formatv("{0}_linked_{1}", moduleName, name()); + auto linkedExecutableOp = builder.create<IREE::HAL::ExecutableOp>( + moduleOp.getLoc(), linkedExecutableName); + symbolTable.insert(linkedExecutableOp, moduleOp.getBody()->begin()); + + // Add our hal.executable.variant with an empty module. + builder.setInsertionPointToStart(linkedExecutableOp.getBody()); + auto linkedTargetOp = builder.create<IREE::HAL::ExecutableVariantOp>( + moduleOp.getLoc(), sharedTargetAttr.getSymbolNameFragment(), + sharedTargetAttr); + builder.setInsertionPoint(&linkedTargetOp.getBlock().back()); + innerModuleOps.push_back( + builder.create<mlir::ModuleOp>(moduleOp.getLoc())); + + // Try linking together all executables in moduleOp. + if (failed(linkExecutablesInto( + moduleOp, sourceExecutableOps, linkedExecutableOp, linkedTargetOp, + [](mlir::ModuleOp moduleOp) { return moduleOp; }, builder))) + return failure(); + } + + // 3. Now we can have multiple spv.module ops in the same + // hal.executable.variant inner module. Combining them into one. + + auto symbolRenameListener = [](spirv::ModuleOp symbolTable, + StringRef oldSymbol, StringRef newSymbol) { + // We don't care about global variable renaming. There should not exist + // duplicated functions. But double check that. + if (Operation *op = SymbolTable::lookupSymbolIn(symbolTable, oldSymbol)) { + assert(!isa<spirv::FuncOp>(op) && + "found duplicated spv.func names when linking!"); + } + }; + + for (mlir::ModuleOp innerModule : innerModuleOps) { + auto spvModules = + llvm::to_vector<4>(innerModule.getBody()->getOps<spirv::ModuleOp>()); + if (spvModules.size() <= 1) continue; + + OpBuilder builder(innerModule); + auto newModule = builder.create<mlir::ModuleOp>(innerModule.getLoc()); + + // Create the combined spv.module op and erase the old inner module. + builder.setInsertionPointToStart(newModule.getBody()); + spirv::combine(spvModules, builder, symbolRenameListener).release(); + innerModule.erase(); + } + + return success(); + } +*/ + LogicalResult serializeExecutable(IREE::HAL::ExecutableVariantOp variantOp, OpBuilder &executableBuilder) override { ModuleOp innerModuleOp = variantOp.getInnerModule(); - auto spvModuleOp = *innerModuleOp.getOps<spirv::ModuleOp>().begin(); + auto spirvModuleOps = innerModuleOp.getOps<spirv::ModuleOp>(); + if (!llvm::hasSingleElement(spirvModuleOps)) { + return variantOp.emitError() + << "should only contain exactly one spv.module op"; + } + auto spvModuleOp = *spirvModuleOps.begin(); FlatbufferBuilder builder; iree_SpirVExecutableDef_start_as_root(builder);
diff --git a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/BUILD b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/BUILD index 5a93026..524af16 100644 --- a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/BUILD +++ b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/BUILD
@@ -17,6 +17,7 @@ name = "lit", srcs = enforce_glob( [ + "linking.mlir", "smoketest.mlir", ], include = ["*.mlir"],
diff --git a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/CMakeLists.txt b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/CMakeLists.txt index c2c0cf1..7c55f8b 100644 --- a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/CMakeLists.txt +++ b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/CMakeLists.txt
@@ -14,6 +14,7 @@ NAME lit SRCS + "linking.mlir" "smoketest.mlir" DATA iree::tools::IreeFileCheck
diff --git a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/linking.mlir b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/linking.mlir new file mode 100644 index 0000000..d333768 --- /dev/null +++ b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/linking.mlir
@@ -0,0 +1,188 @@ +// TODO(antiagainst): Re-enable SPIR-V linking once the tensorflow integration +// crash is fixed. +// RUN-disabled: iree-opt -split-input-file -iree-hal-link-target-executables='target=vulkan-spirv' %s | IreeFileCheck %s +// RUN: iree-opt -split-input-file %s + +#executable_target_vulkan_spirv_fb = #hal.executable.target<"vulkan", "vulkan-spirv-fb"> + +hal.executable @call_dispatch_0 attributes {sym_visibility = "private"} { + hal.interface @io { + hal.interface.binding @s0b0_ro_external, set=0, binding=0, type="StorageBuffer", access="Read" + hal.interface.binding @s0b1_rw_external, set=0, binding=1, type="StorageBuffer", access="Read|Write" + } + hal.executable.variant @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb { + hal.executable.entry_point @call_dispatch_0 attributes {interface = @io, ordinal = 0 : index} + module { + spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> { + spv.func @call_dispatch_0() "None" { + spv.Return + } + spv.EntryPoint "GLCompute" @call_dispatch_0 + spv.ExecutionMode @call_dispatch_0 "LocalSize", 32, 1, 1 + } + hal.interface @io attributes {sym_visibility = "private"} { + hal.interface.binding @s0b0_ro_external, set=0, binding=0, type="StorageBuffer", access="Read" + hal.interface.binding @s0b1_rw_external, set=0, binding=1, type="StorageBuffer", access="Read|Write" + } + } + } +} +hal.executable @call_dispatch_1 attributes {sym_visibility = "private"} { + hal.interface @io { + hal.interface.binding @s0b0_ro_constant, set=0, binding=0, type="StorageBuffer", access="Read" + hal.interface.binding @s0b1_ro_external, set=0, binding=1, type="StorageBuffer", access="Read" + hal.interface.binding @s0b2_xw_external, set=0, binding=2, type="StorageBuffer", access="Write|Discard" + } + hal.executable.variant @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb { + hal.executable.entry_point @call_dispatch_1 attributes {interface = @io, ordinal = 0 : index} + module { + spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> { + spv.func @call_dispatch_1() "None" { + spv.Return + } + spv.EntryPoint "GLCompute" @call_dispatch_1 + spv.ExecutionMode @call_dispatch_1 "LocalSize", 4, 4, 1 + } + hal.interface @io attributes {sym_visibility = "private"} { + hal.interface.binding @s0b0_ro_constant, set=0, binding=0, type="StorageBuffer", access="Read" + hal.interface.binding @s0b1_ro_external, set=0, binding=1, type="StorageBuffer", access="Read" + hal.interface.binding @s0b2_xw_external, set=0, binding=2, type="StorageBuffer", access="Write|Discard" + } + } + } +} +hal.executable @call_dispatch_2 attributes {sym_visibility = "private"} { + hal.interface @io { + hal.interface.binding @s0b0_ro_external, set=0, binding=0, type="StorageBuffer", access="Read" + hal.interface.binding @s0b1_rw_external, set=0, binding=1, type="StorageBuffer", access="Read|Write" + } + hal.executable.variant @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb { + hal.executable.entry_point @call_dispatch_2 attributes {interface = @io, ordinal = 0 : index} + module { + spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> { + spv.func @call_dispatch_2() "None" { + spv.Return + } + spv.EntryPoint "GLCompute" @call_dispatch_2 + spv.ExecutionMode @call_dispatch_2 "LocalSize", 32, 1, 1 + } + hal.interface @io attributes {sym_visibility = "private"} { + hal.interface.binding @s0b0_ro_external, set=0, binding=0, type="StorageBuffer", access="Read" + hal.interface.binding @s0b1_rw_external, set=0, binding=1, type="StorageBuffer", access="Read|Write" + } + } + } +} +hal.executable @call_dispatch_3 attributes {sym_visibility = "private"} { + hal.interface @io { + hal.interface.binding @s0b0_ro_constant, set=0, binding=0, type="StorageBuffer", access="Read" + hal.interface.binding @s0b1_ro_external, set=0, binding=1, type="StorageBuffer", access="Read" + hal.interface.binding @s0b2_xw_external, set=0, binding=2, type="StorageBuffer", access="Write|Discard" + } + hal.executable.variant @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb { + hal.executable.entry_point @call_dispatch_3 attributes {interface = @io, ordinal = 0 : index} { + ^bb0(%arg0: index, %arg1: index, %arg2: index): // no predecessors + %c1 = constant 1 : index + %c56 = constant 56 : index + %c56_0 = constant 56 : index + hal.return %c1, %c56, %c56_0 : index, index, index + } + module { + spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> { + spv.func @call_dispatch_3() "None" { + spv.Return + } + spv.EntryPoint "GLCompute" @call_dispatch_3 + spv.ExecutionMode @call_dispatch_3 "LocalSize", 8, 2, 2 + } + hal.interface @io attributes {sym_visibility = "private"} { + hal.interface.binding @s0b0_ro_constant, set=0, binding=0, type="StorageBuffer", access="Read" + hal.interface.binding @s0b1_ro_external, set=0, binding=1, type="StorageBuffer", access="Read" + hal.interface.binding @s0b2_xw_external, set=0, binding=2, type="StorageBuffer", access="Write|Discard" + } + } + } +} +hal.executable @call_dispatch_4 attributes {sym_visibility = "private"} { + hal.interface @io { + hal.interface.binding @s0b0_ro_constant, set=0, binding=0, type="StorageBuffer", access="Read" + hal.interface.binding @s0b1_ro_external, set=0, binding=1, type="StorageBuffer", access="Read" + hal.interface.binding @s0b2_xw_external, set=0, binding=2, type="StorageBuffer", access="Write|Discard" + } + hal.executable.variant @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb { + hal.executable.entry_point @call_dispatch_4 attributes {interface = @io, ordinal = 0 : index} + module { + spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> { + spv.func @call_dispatch_4() "None" { + spv.Return + } + spv.EntryPoint "GLCompute" @call_dispatch_4 + spv.ExecutionMode @call_dispatch_4 "LocalSize", 2, 8, 1 + } + hal.interface @io attributes {sym_visibility = "private"} { + hal.interface.binding @s0b0_ro_constant, set=0, binding=0, type="StorageBuffer", access="Read" + hal.interface.binding @s0b1_ro_external, set=0, binding=1, type="StorageBuffer", access="Read" + hal.interface.binding @s0b2_xw_external, set=0, binding=2, type="StorageBuffer", access="Write|Discard" + } + } + } +} + +// Two groups should be created, according to their interfaces. + +// CHECK: hal.executable @linking_linked_vulkan_0 { +// CHECK-NEXT: hal.interface @io_0 { +// CHECK-NEXT: hal.interface.binding @s0b0_ro_constant, set=0, binding=0, type="StorageBuffer", access="Read" +// CHECK-NEXT: hal.interface.binding @s0b1_ro_external, set=0, binding=1, type="StorageBuffer", access="Read" +// CHECK-NEXT: hal.interface.binding @s0b2_xw_external, set=0, binding=2, type="StorageBuffer", access="Write|Discard" +// CHECK-NEXT: } +// CHECK-NEXT: hal.executable.variant @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb { +// CHECK-NEXT: hal.executable.entry_point @call_dispatch_1 attributes {interface = @io_0, ordinal = 0 : index} +// CHECK-NEXT: hal.executable.entry_point @call_dispatch_3 attributes {interface = @io_0, ordinal = 1 : index} +// CHECK-NEXT: hal.executable.entry_point @call_dispatch_4 attributes {interface = @io_0, ordinal = 2 : index} +// CHECK-NEXT: module { +// CHECK-NEXT: spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> { +// CHECK-NEXT: spv.func @call_dispatch_1() "None" { +// CHECK-NEXT: spv.Return +// CHECK-NEXT: } +// CHECK-NEXT: spv.EntryPoint "GLCompute" @call_dispatch_1 +// CHECK-NEXT: spv.ExecutionMode @call_dispatch_1 "LocalSize", 4, 4, 1 +// CHECK-NEXT: spv.func @call_dispatch_3() "None" { +// CHECK-NEXT: spv.Return +// CHECK-NEXT: } +// CHECK-NEXT: spv.EntryPoint "GLCompute" @call_dispatch_3 +// CHECK-NEXT: spv.ExecutionMode @call_dispatch_3 "LocalSize", 8, 2, 2 +// CHECK-NEXT: spv.func @call_dispatch_4() "None" { +// CHECK-NEXT: spv.Return +// CHECK-NEXT: } +// CHECK-NEXT: spv.EntryPoint "GLCompute" @call_dispatch_4 +// CHECK-NEXT: spv.ExecutionMode @call_dispatch_4 "LocalSize", 2, 8, 1 +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: } + +// CHECK: hal.executable @linking_linked_vulkan { +// CHECK-NEXT: hal.interface @io_0 { +// CHECK-NEXT: hal.interface.binding @s0b0_ro_external, set=0, binding=0, type="StorageBuffer", access="Read" +// CHECK-NEXT: hal.interface.binding @s0b1_rw_external, set=0, binding=1, type="StorageBuffer", access="Read|Write" +// CHECK-NEXT: } +// CHECK-NEXT: hal.executable.variant @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb { +// CHECK-NEXT: hal.executable.entry_point @call_dispatch_0 attributes {interface = @io_0, ordinal = 0 : index} +// CHECK-NEXT: hal.executable.entry_point @call_dispatch_2 attributes {interface = @io_0, ordinal = 1 : index} +// CHECK-NEXT: module { +// CHECK-NEXT: spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> { +// CHECK-NEXT: spv.func @call_dispatch_0() "None" { +// CHECK-NEXT: spv.Return +// CHECK-NEXT: } +// CHECK-NEXT: spv.EntryPoint "GLCompute" @call_dispatch_0 +// CHECK-NEXT: spv.ExecutionMode @call_dispatch_0 "LocalSize", 32, 1, 1 +// CHECK-NEXT: spv.func @call_dispatch_2() "None" { +// CHECK-NEXT: spv.Return +// CHECK-NEXT: } +// CHECK-NEXT: spv.EntryPoint "GLCompute" @call_dispatch_2 +// CHECK-NEXT: spv.ExecutionMode @call_dispatch_2 "LocalSize", 32, 1, 1 +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: }
diff --git a/iree/compiler/Dialect/HAL/Transforms/BUILD b/iree/compiler/Dialect/HAL/Transforms/BUILD index 536e917..7797917 100644 --- a/iree/compiler/Dialect/HAL/Transforms/BUILD +++ b/iree/compiler/Dialect/HAL/Transforms/BUILD
@@ -47,11 +47,11 @@ "//iree/compiler/Dialect/HAL/IR:HALDialect", "//iree/compiler/Dialect/HAL/Target", "//iree/compiler/Dialect/HAL/Utils", - "//iree/compiler/Dialect/IREE/Conversion:PreserveCompilerHints", - "//iree/compiler/Dialect/IREE/IR", - "//iree/compiler/Dialect/IREE/Transforms", "//iree/compiler/Dialect/Shape/IR", "//iree/compiler/Dialect/Shape/Transforms", + "//iree/compiler/Dialect/Util/Conversion:PreserveCompilerHints", + "//iree/compiler/Dialect/Util/IR", + "//iree/compiler/Dialect/Util/Transforms", "@llvm-project//llvm:Support", "@llvm-project//mlir:AffineToStandard", "@llvm-project//mlir:IR",
diff --git a/iree/compiler/Dialect/HAL/Transforms/CMakeLists.txt b/iree/compiler/Dialect/HAL/Transforms/CMakeLists.txt index 2073e36..2f5bd2d 100644 --- a/iree/compiler/Dialect/HAL/Transforms/CMakeLists.txt +++ b/iree/compiler/Dialect/HAL/Transforms/CMakeLists.txt
@@ -53,11 +53,11 @@ iree::compiler::Dialect::HAL::IR::HALDialect iree::compiler::Dialect::HAL::Target iree::compiler::Dialect::HAL::Utils - iree::compiler::Dialect::IREE::Conversion::PreserveCompilerHints - iree::compiler::Dialect::IREE::IR - iree::compiler::Dialect::IREE::Transforms iree::compiler::Dialect::Shape::IR iree::compiler::Dialect::Shape::Transforms + iree::compiler::Dialect::Util::Conversion::PreserveCompilerHints + iree::compiler::Dialect::Util::IR + iree::compiler::Dialect::Util::Transforms PUBLIC )
diff --git a/iree/compiler/Dialect/HAL/Transforms/ConvertToHAL.cpp b/iree/compiler/Dialect/HAL/Transforms/ConvertToHAL.cpp index c1adca8..73ad8e7 100644 --- a/iree/compiler/Dialect/HAL/Transforms/ConvertToHAL.cpp +++ b/iree/compiler/Dialect/HAL/Transforms/ConvertToHAL.cpp
@@ -15,12 +15,12 @@ #include "iree/compiler/Dialect/HAL/IR/HALDialect.h" #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" -#include "iree/compiler/Dialect/IREE/Conversion/PreserveCompilerHints.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" -#include "iree/compiler/Dialect/IREE/Transforms/Passes.h" #include "iree/compiler/Dialect/Shape/IR/ShapeOps.h" +#include "iree/compiler/Dialect/Util/Conversion/PreserveCompilerHints.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" +#include "iree/compiler/Dialect/Util/Transforms/Passes.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" @@ -41,7 +41,7 @@ : public PassWrapper<ConvertToHALPass, OperationPass<ModuleOp>> { public: void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert<IREEDialect>(); + registry.insert<IREE::Util::UtilDialect>(); registry.insert<HALDialect>(); registry.insert<StandardOpsDialect>(); } @@ -74,8 +74,9 @@ populateIREEToHALPatterns(context, conversionTarget, typeConverter, patterns); - setupCompilerHintsLegality(context, conversionTarget, typeConverter); - populatePreserveCompilerHintsPatterns(context, patterns); + IREE::Util::setupCompilerHintsLegality(context, conversionTarget, + typeConverter); + IREE::Util::populatePreserveCompilerHintsPatterns(context, patterns); setupStandardToHALLegality(context, conversionTarget, typeConverter); populateStandardToHALPatterns(context, patterns, typeConverter);
diff --git a/iree/compiler/Dialect/HAL/Transforms/InlineDeviceSwitches.cpp b/iree/compiler/Dialect/HAL/Transforms/InlineDeviceSwitches.cpp index c3158e5..50ca737 100644 --- a/iree/compiler/Dialect/HAL/Transforms/InlineDeviceSwitches.cpp +++ b/iree/compiler/Dialect/HAL/Transforms/InlineDeviceSwitches.cpp
@@ -8,8 +8,8 @@ #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/Transforms/Passes.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "llvm/ADT/StringSet.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Attributes.h" @@ -121,7 +121,7 @@ // condition will add its IR to the block. } else { // Fallthrough of all expressions; die if we expected return values. - funcBuilder.create<IREE::UnreachableOp>( + funcBuilder.create<IREE::Util::UnreachableOp>( switchOp.getLoc(), "device not supported in the compiled configuration"); } @@ -136,7 +136,7 @@ : public PassWrapper<InlineDeviceSwitchesPass, OperationPass<FuncOp>> { public: void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert<IREEDialect>(); + registry.insert<IREE::Util::UtilDialect>(); } StringRef getArgument() const override {
diff --git a/iree/compiler/Dialect/HAL/Transforms/MaterializeConstantPoolBuffers.cpp b/iree/compiler/Dialect/HAL/Transforms/MaterializeConstantPoolBuffers.cpp index 8f022a5..e779dfb 100644 --- a/iree/compiler/Dialect/HAL/Transforms/MaterializeConstantPoolBuffers.cpp +++ b/iree/compiler/Dialect/HAL/Transforms/MaterializeConstantPoolBuffers.cpp
@@ -10,7 +10,7 @@ #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/Transforms/Passes.h" #include "iree/compiler/Dialect/HAL/Utils/TypeUtils.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" @@ -29,7 +29,7 @@ public: void getDependentDialects(DialectRegistry ®istry) const override { registry.insert<mlir::StandardOpsDialect>(); - registry.insert<IREEDialect>(); + registry.insert<IREE::Util::UtilDialect>(); registry.insert<IREE::HAL::HALDialect>(); } @@ -138,7 +138,7 @@ // TODO(benvanik): allocate based on usage tracking. auto sourceValue = funcBuilder.createOrFold<IREE::HAL::ConstantStorageLookupOp>( - storageOp.getLoc(), IREE::ByteBufferType::get(context), + storageOp.getLoc(), IREE::Util::ByteBufferType::get(context), funcBuilder.getSymbolRefAttr( storageOp->getParentOfType<ConstantPoolOp>().getName(), {funcBuilder.getSymbolRefAttr(storageOp)}));
diff --git a/iree/compiler/Dialect/HAL/Transforms/MaterializeResourceCaches.cpp b/iree/compiler/Dialect/HAL/Transforms/MaterializeResourceCaches.cpp index 4d4bca1..183a290 100644 --- a/iree/compiler/Dialect/HAL/Transforms/MaterializeResourceCaches.cpp +++ b/iree/compiler/Dialect/HAL/Transforms/MaterializeResourceCaches.cpp
@@ -10,7 +10,7 @@ #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/Transforms/Passes.h" #include "iree/compiler/Dialect/HAL/Utils/DeviceSwitchBuilder.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" @@ -245,7 +245,7 @@ IREE::HAL::MatchAlwaysAttr::get(loc.getContext())); auto defaultBuilder = OpBuilder::atBlockBegin(&defaultRegion->front()); auto nullValue = - defaultBuilder.createOrFold<IREE::NullOp>(loc, executableType); + defaultBuilder.createOrFold<IREE::Util::NullOp>(loc, executableType); defaultBuilder.create<IREE::HAL::ReturnOp>(loc, nullValue); auto switchOp = switchBuilder.build();
diff --git a/iree/compiler/Dialect/HAL/Transforms/test/identify_constant_pools.mlir b/iree/compiler/Dialect/HAL/Transforms/test/identify_constant_pools.mlir index afcfeaf..b8fe060 100644 --- a/iree/compiler/Dialect/HAL/Transforms/test/identify_constant_pools.mlir +++ b/iree/compiler/Dialect/HAL/Transforms/test/identify_constant_pools.mlir
@@ -58,8 +58,8 @@ flow.variable @_large_const_0 dense<3.0> : tensor<128xf32> func @skip_indirect_variables() -> (tensor<128xf32>) { // CHECK: flow.variable.address - %0 = flow.variable.address @_large_const_0 : !iree.ptr<tensor<128xf32>> + %0 = flow.variable.address @_large_const_0 : !util.ptr<tensor<128xf32>> // CHECK: flow.variable.load.indirect - %1 = flow.variable.load.indirect %0 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> + %1 = flow.variable.load.indirect %0 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> return %1 : tensor<128xf32> }
diff --git a/iree/compiler/Dialect/HAL/Transforms/test/materialize_constant_pool_buffers.mlir b/iree/compiler/Dialect/HAL/Transforms/test/materialize_constant_pool_buffers.mlir index 66052c1..90c59dd 100644 --- a/iree/compiler/Dialect/HAL/Transforms/test/materialize_constant_pool_buffers.mlir +++ b/iree/compiler/Dialect/HAL/Transforms/test/materialize_constant_pool_buffers.mlir
@@ -11,9 +11,9 @@ // CHECK: hal.variable @dense_variable_init_storage_buffer init(@dense_variable_init_storage_buffer_initializer) : !hal.buffer // CHECK-NEXT: func private @dense_variable_init_storage_buffer_initializer() -> !hal.buffer -// CHECK: %[[STORAGE:.+]] = hal.constant_storage.lookup @dense_variable_init::@_storage : !iree.byte_buffer +// CHECK: %[[STORAGE:.+]] = hal.constant_storage.lookup @dense_variable_init::@_storage : !util.byte_buffer // CHECK: = hal.allocator.map<%allocator : !hal.allocator> -// CHECK-SAME: source(%[[STORAGE]] : !iree.byte_buffer)[%c0, %c768] +// CHECK-SAME: source(%[[STORAGE]] : !util.byte_buffer)[%c0, %c768] // CHECK-SAME: : !hal.buffer // ----- @@ -56,9 +56,9 @@ // CHECK: hal.variable @pool_storage0_buffer init(@pool_storage0_buffer_initializer) : !hal.buffer // CHECK-NEXT: func private @pool_storage0_buffer_initializer() -> !hal.buffer -// CHECK: %[[STORAGE:.+]] = hal.constant_storage.lookup @pool::@_storage0 : !iree.byte_buffer +// CHECK: %[[STORAGE:.+]] = hal.constant_storage.lookup @pool::@_storage0 : !util.byte_buffer // CHECK: = hal.allocator.map<%allocator : !hal.allocator> -// CHECK-SAME: source(%[[STORAGE]] : !iree.byte_buffer)[%c0, %c16] +// CHECK-SAME: source(%[[STORAGE]] : !util.byte_buffer)[%c0, %c16] // CHECK-SAME: : !hal.buffer // CHECK: hal.variable @pool_storage1_buffer init(@pool_storage1_buffer_initializer) : !hal.buffer
diff --git a/iree/compiler/Dialect/HAL/Transforms/test/materialize_resource_caches.mlir b/iree/compiler/Dialect/HAL/Transforms/test/materialize_resource_caches.mlir index 29652e4..ac269cb 100644 --- a/iree/compiler/Dialect/HAL/Transforms/test/materialize_resource_caches.mlir +++ b/iree/compiler/Dialect/HAL/Transforms/test/materialize_resource_caches.mlir
@@ -159,7 +159,7 @@ // CHECK: hal.return %[[EXE]] : !hal.executable // CHECK: }, // CHECK: #hal.match.always { -// CHECK: %[[NULL:.+]] = iree.null : !hal.executable +// CHECK: %[[NULL:.+]] = util.null : !hal.executable // CHECK: hal.return %[[NULL]] : !hal.executable // CHECK: } // CHECK: return %[[RET]] : !hal.executable
diff --git a/iree/compiler/Dialect/HAL/Utils/BUILD b/iree/compiler/Dialect/HAL/Utils/BUILD index 5286d9b..947fd6e 100644 --- a/iree/compiler/Dialect/HAL/Utils/BUILD +++ b/iree/compiler/Dialect/HAL/Utils/BUILD
@@ -21,8 +21,8 @@ ], deps = [ "//iree/compiler/Dialect/HAL/IR", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Shape/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR", "@llvm-project//mlir:StandardOps",
diff --git a/iree/compiler/Dialect/HAL/Utils/CMakeLists.txt b/iree/compiler/Dialect/HAL/Utils/CMakeLists.txt index cea1afc..fafaac6 100644 --- a/iree/compiler/Dialect/HAL/Utils/CMakeLists.txt +++ b/iree/compiler/Dialect/HAL/Utils/CMakeLists.txt
@@ -25,8 +25,8 @@ MLIRSupport MLIRTransforms iree::compiler::Dialect::HAL::IR - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Shape::IR + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/Dialect/HAL/Utils/TypeUtils.cpp b/iree/compiler/Dialect/HAL/Utils/TypeUtils.cpp index 2b3ceb2..1eb2477 100644 --- a/iree/compiler/Dialect/HAL/Utils/TypeUtils.cpp +++ b/iree/compiler/Dialect/HAL/Utils/TypeUtils.cpp
@@ -8,9 +8,9 @@ #include "iree/compiler/Dialect/HAL/IR/HALOps.h" #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" #include "iree/compiler/Dialect/Shape/IR/Builders.h" #include "iree/compiler/Dialect/Shape/IR/ShapeOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/Optional.h" #include "llvm/Support/Debug.h" #include "mlir/Dialect/StandardOps/IR/Ops.h"
diff --git a/iree/compiler/Dialect/IREE/Conversion/PreserveCompilerHints.cpp b/iree/compiler/Dialect/IREE/Conversion/PreserveCompilerHints.cpp deleted file mode 100644 index 9e18f56..0000000 --- a/iree/compiler/Dialect/IREE/Conversion/PreserveCompilerHints.cpp +++ /dev/null
@@ -1,48 +0,0 @@ -// Copyright 2019 The IREE Authors -// -// Licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -#include "iree/compiler/Dialect/IREE/Conversion/PreserveCompilerHints.h" - -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" -#include "mlir/IR/MLIRContext.h" -#include "mlir/IR/PatternMatch.h" -#include "mlir/Transforms/DialectConversion.h" - -namespace mlir { -namespace iree_compiler { - -namespace { -class PreserveDoNotOptimize - : public OpConversionPattern<IREE::DoNotOptimizeOp> { - public: - using OpConversionPattern<IREE::DoNotOptimizeOp>::OpConversionPattern; - LogicalResult matchAndRewrite( - IREE::DoNotOptimizeOp op, llvm::ArrayRef<Value> operands, - ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp<IREE::DoNotOptimizeOp>(op, operands, - op->getAttrs()); - return success(); - } -}; -} // namespace - -void setupCompilerHintsLegality(MLIRContext *context, ConversionTarget &target, - TypeConverter &typeConverter) { - target.addDynamicallyLegalOp<IREE::DoNotOptimizeOp>( - [&](IREE::DoNotOptimizeOp op) { - return llvm::all_of(op.getResultTypes(), [&typeConverter](Type t) { - return typeConverter.isLegal(t); - }); - }); -} - -void populatePreserveCompilerHintsPatterns(MLIRContext *context, - OwningRewritePatternList &patterns) { - patterns.insert<PreserveDoNotOptimize>(context); -} - -} // namespace iree_compiler -} // namespace mlir
diff --git a/iree/compiler/Dialect/IREE/IR/test/byte_buffer_ops.mlir b/iree/compiler/Dialect/IREE/IR/test/byte_buffer_ops.mlir deleted file mode 100644 index 27a58de..0000000 --- a/iree/compiler/Dialect/IREE/IR/test/byte_buffer_ops.mlir +++ /dev/null
@@ -1,8 +0,0 @@ -// RUN: iree-opt -split-input-file %s | iree-opt -split-input-file | IreeFileCheck %s - -// CHECK-LABEL: @byte_buffer_constant -func @byte_buffer_constant() -> !iree.byte_buffer { - // CHECK: = iree.byte_buffer.constant : !iree.byte_buffer = dense<[1, 2, 3]> : tensor<3xi32> - %0 = iree.byte_buffer.constant : !iree.byte_buffer = dense<[1, 2, 3]> : tensor<3xi32> - return %0 : !iree.byte_buffer -}
diff --git a/iree/compiler/Dialect/IREE/IR/test/list_ops.mlir b/iree/compiler/Dialect/IREE/IR/test/list_ops.mlir deleted file mode 100644 index b3f046b..0000000 --- a/iree/compiler/Dialect/IREE/IR/test/list_ops.mlir +++ /dev/null
@@ -1,85 +0,0 @@ -// RUN: iree-opt -split-input-file %s | iree-opt -split-input-file | IreeFileCheck %s - -// CHECK-LABEL: @list_init_ops -func @list_init_ops() { - // CHECK: %[[CAPACITY:.+]] = constant 5 - %capacity = constant 5 : index - // CHECK: = iree.list.create %[[CAPACITY]] : !iree.list<?> - %list_initial_capacity = iree.list.create %capacity : !iree.list<?> - - // CHECK: %[[LIST:.+]] = iree.list.create : !iree.list<?> - %list = iree.list.create : !iree.list<?> - - // CHECK: %[[NEW_SIZE:.+]] = constant 100 - %new_size = constant 100 : index - // CHECK: iree.list.resize %[[LIST]], %[[NEW_SIZE]] : !iree.list<?> - iree.list.resize %list, %new_size : !iree.list<?> - - return -} - -// ----- - -// CHECK-LABEL: @list_access -// CHECK-SAME: (%[[LIST:.+]]: !iree.list<i32>) -func @list_access(%list: !iree.list<i32>) { - %c10 = constant 10 : index - - // CHECK: = iree.list.get %[[LIST]][%c10] : !iree.list<i32> - %0 = iree.list.get %list[%c10] : !iree.list<i32> - // CHECK: = iree.list.get %[[LIST]][%c10] : !iree.list<i32> - %1 = iree.list.get %list[%c10] : !iree.list<i32> -> i32 - - // CHECK: %[[NEW_VALUE:.+]] = constant 100 : i32 - %new_value = constant 100 : i32 - // CHECK: iree.list.set %[[LIST]][%c10], %[[NEW_VALUE]] : !iree.list<i32> - iree.list.set %list[%c10], %new_value : !iree.list<i32> - - return -} - -// ----- - -// CHECK-LABEL: @list_access_tensor -// CHECK-SAME: (%[[LIST:.+]]: !iree.list<tensor<*xf32>>) -func @list_access_tensor(%list: !iree.list<tensor<*xf32>>) { - %c10 = constant 10 : index - - // CHECK: = iree.list.get %[[LIST]][%c10] : !iree.list<tensor<*xf32>> -> tensor<?xf32> - %0 = iree.list.get %list[%c10] : !iree.list<tensor<*xf32>> -> tensor<?xf32> - - // CHECK: %[[NEW_VALUE:.+]] = constant dense<1> : tensor<5xi32> - %new_value = constant dense<1> : tensor<5xi32> - // CHECK: iree.list.set %[[LIST]][%c10], %[[NEW_VALUE]] : tensor<5xi32> -> !iree.list<tensor<*xf32>> - iree.list.set %list[%c10], %new_value : tensor<5xi32> -> !iree.list<tensor<*xf32>> - - return -} - -// ----- - -// CHECK-LABEL: @list_access_variant -// CHECK-SAME: (%[[LIST:.+]]: !iree.list<?>) -func @list_access_variant(%list: !iree.list<?>) { - %c10 = constant 10 : index - %c11 = constant 11 : index - - // CHECK: = iree.list.get %[[LIST]][%c10] : !iree.list<?> -> i32 - %0 = iree.list.get %list[%c10] : !iree.list<?> -> i32 - - // CHECK: %[[NEW_I32_VALUE:.+]] = constant 100 : i32 - %new_i32_value = constant 100 : i32 - // CHECK: iree.list.set %[[LIST]][%c10], %[[NEW_I32_VALUE]] : i32 -> !iree.list<?> - iree.list.set %list[%c10], %new_i32_value : i32 -> !iree.list<?> - - // CHECK: = iree.list.get %[[LIST]][%c11] : !iree.list<?> -> tensor<5xf32> - %1 = iree.list.get %list[%c11] : !iree.list<?> -> tensor<5xf32> - - // CHECK: %[[NEW_TENSOR_VALUE:.+]] = constant dense<1> : tensor<5xi32> - %new_tensor_value = constant dense<1> : tensor<5xi32> - // CHECK: iree.list.set %[[LIST]][%c11], %[[NEW_TENSOR_VALUE]] : tensor<5xi32> -> !iree.list<?> - iree.list.set %list[%c11], %new_tensor_value : tensor<5xi32> -> !iree.list<?> - - return -} -
diff --git a/iree/compiler/Dialect/IREE/IR/test/parse_print.mlir b/iree/compiler/Dialect/IREE/IR/test/parse_print.mlir deleted file mode 100644 index 06d2c0b..0000000 --- a/iree/compiler/Dialect/IREE/IR/test/parse_print.mlir +++ /dev/null
@@ -1,48 +0,0 @@ -// RUN: iree-opt -split-input-file %s | iree-opt -split-input-file | IreeFileCheck %s - -// CHECK-LABEL: @parse_print_do_not_optimize -// CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] -// CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] -func @parse_print_do_not_optimize(%arg0 : tensor<i32>, %arg1 : tensor<i32>) { - // CHECK: iree.do_not_optimize() - iree.do_not_optimize() - - // CHECK-NEXT: iree.do_not_optimize(%[[ARG0]]) : tensor<i32> - %1 = iree.do_not_optimize(%arg0) : tensor<i32> - - // CHECK-NEXT: iree.do_not_optimize(%[[ARG0]], %[[ARG1]]) : tensor<i32>, tensor<i32> - %2:2 = iree.do_not_optimize(%arg0, %arg1) : tensor<i32>, tensor<i32> - - // CHECK-NEXT: iree.do_not_optimize(%[[ARG0]]) {some_unit} : tensor<i32> - %has_attr = iree.do_not_optimize(%arg0) {some_unit} : tensor<i32> - - return -} - -// ----- - -// CHECK-LABEL: @parse_print_unfoldable_constant -func @parse_print_unfoldable_constant(%arg0 : tensor<i32>, %arg1 : tensor<i32>) { - // CHECK-NEXT: iree.unfoldable_constant 42 - %c42 = iree.unfoldable_constant 42 : i32 - - // CHECK: iree.unfoldable_constant {attr = "foo"} 43 : i32 - %cattr = iree.unfoldable_constant {attr = "foo"} 43 : i32 - - // CHECK: iree.unfoldable_constant @func_with_args : (f32) -> () - %csymref = iree.unfoldable_constant @func_with_args : (f32) -> () - - return -} - -// ----- - -// CHECK-LABEL: @parse_print_dynamic_shape_constant -func @parse_print_dynamic_shape_constant() { - // CHECK-NEXT: iree.dynamic_shape_constant dense<2> : tensor<2xi32> -> tensor<?xi32> - %c = iree.dynamic_shape_constant dense<2> : tensor<2xi32> -> tensor<?xi32> - - // CHECK-NEXT: iree.dynamic_shape_constant dense<2> : tensor<2xi32> {attr = "foo"} -> tensor<?xi32> - %has_attr = iree.dynamic_shape_constant dense<2> : tensor<2xi32> {attr = "foo"} -> tensor<?xi32> - return -}
diff --git a/iree/compiler/Dialect/Modules/Check/IR/BUILD b/iree/compiler/Dialect/Modules/Check/IR/BUILD index b856787..f594b91 100644 --- a/iree/compiler/Dialect/Modules/Check/IR/BUILD +++ b/iree/compiler/Dialect/Modules/Check/IR/BUILD
@@ -22,7 +22,7 @@ ), deps = [ "//iree/compiler/Dialect/HAL/IR:td_files", - "//iree/compiler/Dialect/IREE/IR:td_files", + "//iree/compiler/Dialect/Util/IR:td_files", "@llvm-project//mlir:OpBaseTdFiles", ], )
diff --git a/iree/compiler/Dialect/Modules/Check/IR/CheckOps.td b/iree/compiler/Dialect/Modules/Check/IR/CheckOps.td index 3f8e69f..e47987e 100644 --- a/iree/compiler/Dialect/Modules/Check/IR/CheckOps.td +++ b/iree/compiler/Dialect/Modules/Check/IR/CheckOps.td
@@ -7,7 +7,7 @@ #ifndef IREE_MODULES_CHECK_DIALECT_CHECK_OPS #define IREE_MODULES_CHECK_DIALECT_CHECK_OPS -include "iree/compiler/Dialect/IREE/IR/IREEBase.td" +include "iree/compiler/Dialect/Util/IR/UtilBase.td" include "iree/compiler/Dialect/HAL/IR/HALBase.td" def CHECK_Dialect : Dialect {
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/BUILD b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/BUILD index bbe4e44..48fa3df 100644 --- a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/BUILD +++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/BUILD
@@ -20,9 +20,9 @@ ], deps = [ "//iree/compiler/Dialect/HAL/IR", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Modules/VMVX/IR", "//iree/compiler/Dialect/Modules/VMVX/IR:VMVXDialect", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//mlir:IR", "@llvm-project//mlir:MemRefDialect", "@llvm-project//mlir:Pass",
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/CMakeLists.txt b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/CMakeLists.txt index 8277c03..8870228 100644 --- a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/CMakeLists.txt +++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/CMakeLists.txt
@@ -24,9 +24,9 @@ MLIRStandard MLIRTransforms iree::compiler::Dialect::HAL::IR - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Modules::VMVX::IR iree::compiler::Dialect::Modules::VMVX::IR::VMVXDialect + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.cpp b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.cpp index 4e683a9..f62175c 100644 --- a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.cpp +++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.cpp
@@ -7,11 +7,11 @@ #include "iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.h" #include "iree/compiler/Dialect/HAL/IR/HALOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXDialect.h" #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXOps.h" #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXTypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Attributes.h" @@ -52,7 +52,7 @@ /// func @entry( /// %local_memory: !vmvx.buffer, /// %constants: !vmvx.buffer, -/// %bindings: !iree.list<!vmvx.buffer>, +/// %bindings: !util.list<!vmvx.buffer>, /// %workgroup_x: index, /// %workgroup_y: index, /// %workgroup_z: index, @@ -74,7 +74,7 @@ auto i32Type = IntegerType::get(funcOp.getContext(), 32); auto memRefI8Type = MemRefType::get({-1}, i8Type); auto memRefI32Type = MemRefType::get({-1}, i32Type); - auto bindingsType = IREE::ListType::get(memRefI8Type); + auto bindingsType = IREE::Util::ListType::get(memRefI8Type); auto indexType = IndexType::get(funcOp.getContext()); auto newType = FunctionType::get(funcOp.getContext(), { @@ -216,7 +216,7 @@ // Find the vmvx.interface argument to the function. auto bindingsArg = op->getParentOfType<mlir::FuncOp>().getArgument(kEntryArgBindings); - assert(bindingsArg && bindingsArg.getType().isa<IREE::ListType>() && + assert(bindingsArg && bindingsArg.getType().isa<IREE::Util::ListType>() && "entry point not conforming to requirements"); // Lookup the source interface binding. @@ -229,8 +229,8 @@ } auto bindingType = - bindingsArg.getType().cast<IREE::ListType>().getElementType(); - auto getOp = rewriter.create<IREE::ListGetOp>( + bindingsArg.getType().cast<IREE::Util::ListType>().getElementType(); + auto getOp = rewriter.create<IREE::Util::ListGetOp>( op.getLoc(), bindingType, bindingsArg, rewriter.createOrFold<ConstantIndexOp>( op.getLoc(), interfaceBindingOp.binding().getZExtValue()));
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/test/interface_ops.mlir b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/test/interface_ops.mlir index b5b65e4..f7a67df 100644 --- a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/test/interface_ops.mlir +++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/test/interface_ops.mlir
@@ -11,7 +11,7 @@ // CHECK-LABEL: func @entry( // CHECK-SAME: %[[SCRATCHPAD:.+]]: memref<?xi8>, // CHECK-SAME: %[[CONSTANTS:.+]]: memref<?xi32>, -// CHECK-SAME: %[[BINDINGS:.+]]: !iree.list<memref<?xi8>>, +// CHECK-SAME: %[[BINDINGS:.+]]: !util.list<memref<?xi8>>, // CHECK-SAME: %[[WORKGROUP_X:[a-z0-9]+]]: index, // CHECK-SAME: %[[WORKGROUP_Y:[a-z0-9]+]]: index, // CHECK-SAME: %[[WORKGROUP_Z:[a-z0-9]+]]: index, @@ -27,10 +27,10 @@ %c0 = constant 0 : index %c1 = constant 1 : index %0 = memref.get_global @__constant_5xi32 : memref<5xi32> - // CHECK: %[[BINDING0_RAW:.+]] = iree.list.get %[[BINDINGS]][%c0] : !iree.list<memref<?xi8>> + // CHECK: %[[BINDING0_RAW:.+]] = util.list.get %[[BINDINGS]][%c0] : !util.list<memref<?xi8>> // CHECK-NEXT: %[[BINDING0:.+]] = builtin.unrealized_conversion_cast %[[BINDING0_RAW]] : memref<?xi8> to memref<5xf32> %1 = hal.interface.binding.subspan @io::@s0b0_ro_external[%c0] : memref<5xf32> - // CHECK: %[[BINDING1_RAW:.+]] = iree.list.get %[[BINDINGS]][%c1] : !iree.list<memref<?xi8>> + // CHECK: %[[BINDING1_RAW:.+]] = util.list.get %[[BINDINGS]][%c1] : !util.list<memref<?xi8>> // CHECK-NEXT: %[[BINDING1:.+]] = builtin.unrealized_conversion_cast %[[BINDING1_RAW]] : memref<?xi8> to memref<5xi32> %2 = hal.interface.binding.subspan @io::@s0b1_xw_external[%c0] : memref<5xi32> %workgroup_size_x = hal.interface.workgroup.size[0] : index
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/BUILD b/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/BUILD index 8bf413f..07089d3 100644 --- a/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/BUILD +++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/BUILD
@@ -19,10 +19,10 @@ "ConvertStandardToVMVX.h", ], deps = [ - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Modules/VMVX/IR", "//iree/compiler/Dialect/Modules/VMVX/IR:VMVXDialect", "//iree/compiler/Dialect/Shape/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//mlir:Affine", "@llvm-project//mlir:IR", "@llvm-project//mlir:LinalgOps",
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/CMakeLists.txt b/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/CMakeLists.txt index 9f9c0bc..b945209 100644 --- a/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/CMakeLists.txt +++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/CMakeLists.txt
@@ -27,10 +27,10 @@ MLIRStandard MLIRSupport MLIRTransforms - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Modules::VMVX::IR iree::compiler::Dialect::Modules::VMVX::IR::VMVXDialect iree::compiler::Dialect::Shape::IR + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.cpp b/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.cpp index 40b8914..d1d2522 100644 --- a/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.cpp +++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.cpp
@@ -6,13 +6,13 @@ #include "iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXDialect.h" #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXOps.h" #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXTypes.h" #include "iree/compiler/Dialect/Shape/IR/Builders.h" #include "iree/compiler/Dialect/Shape/IR/ShapeOps.h" #include "iree/compiler/Dialect/Shape/IR/ShapeTypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/Linalg/IR/LinalgOps.h" #include "mlir/Dialect/Math/IR/Math.h"
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/BUILD b/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/BUILD index 8e231ab..398c92e 100644 --- a/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/BUILD +++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/BUILD
@@ -19,8 +19,8 @@ "ConvertVMVXToVM.h", ], deps = [ - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Modules/VMVX/IR", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Dialect/VM/Conversion", "//iree/compiler/Dialect/VM/Conversion/StandardToVM", "//iree/compiler/Dialect/VM/IR",
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/CMakeLists.txt b/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/CMakeLists.txt index daa36f4..0c42b8a 100644 --- a/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/CMakeLists.txt +++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/CMakeLists.txt
@@ -22,8 +22,8 @@ MLIRPass MLIRStandard MLIRTransforms - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Modules::VMVX::IR + iree::compiler::Dialect::Util::IR iree::compiler::Dialect::VM::Conversion iree::compiler::Dialect::VM::Conversion::StandardToVM iree::compiler::Dialect::VM::IR
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/ConvertVMVXToVM.cpp b/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/ConvertVMVXToVM.cpp index f7e101c..5f47f56 100644 --- a/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/ConvertVMVXToVM.cpp +++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/ConvertVMVXToVM.cpp
@@ -6,9 +6,9 @@ #include "iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/ConvertVMVXToVM.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXOps.h" #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXTypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree/compiler/Dialect/VM/Conversion/ConversionTarget.h" #include "iree/compiler/Dialect/VM/Conversion/ImportUtils.h" #include "iree/compiler/Dialect/VM/Conversion/TypeConverter.h"
diff --git a/iree/compiler/Dialect/Modules/VMVX/IR/BUILD b/iree/compiler/Dialect/Modules/VMVX/IR/BUILD index 551bce6..7a4bac4 100644 --- a/iree/compiler/Dialect/Modules/VMVX/IR/BUILD +++ b/iree/compiler/Dialect/Modules/VMVX/IR/BUILD
@@ -26,8 +26,8 @@ include = ["*.td"], ), deps = [ - "//iree/compiler/Dialect/IREE/IR:td_files", "//iree/compiler/Dialect/Shape/IR:td_files", + "//iree/compiler/Dialect/Util/IR:td_files", "@llvm-project//mlir:OpBaseTdFiles", "@llvm-project//mlir:StdOpsTdFiles", ], @@ -55,8 +55,8 @@ ":VMVXEnumsGen", ":VMVXOpInterfaceGen", ":VMVXOpsGen", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Shape/IR", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Dialect/VM/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR",
diff --git a/iree/compiler/Dialect/Modules/VMVX/IR/CMakeLists.txt b/iree/compiler/Dialect/Modules/VMVX/IR/CMakeLists.txt index 7e13beb..69f9df7 100644 --- a/iree/compiler/Dialect/Modules/VMVX/IR/CMakeLists.txt +++ b/iree/compiler/Dialect/Modules/VMVX/IR/CMakeLists.txt
@@ -37,8 +37,8 @@ MLIRSupport MLIRTransformUtils MLIRTranslation - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Shape::IR + iree::compiler::Dialect::Util::IR iree::compiler::Dialect::VM::IR PUBLIC )
diff --git a/iree/compiler/Dialect/Modules/VMVX/IR/VMVXBase.td b/iree/compiler/Dialect/Modules/VMVX/IR/VMVXBase.td index 5c9c8c4..74465d7 100644 --- a/iree/compiler/Dialect/Modules/VMVX/IR/VMVXBase.td +++ b/iree/compiler/Dialect/Modules/VMVX/IR/VMVXBase.td
@@ -7,7 +7,7 @@ #ifndef IREE_DIALECT_MODULES_VMVX_BASE #define IREE_DIALECT_MODULES_VMVX_BASE -include "iree/compiler/Dialect/IREE/IR/IREEBase.td" +include "iree/compiler/Dialect/Util/IR/UtilBase.td" //===----------------------------------------------------------------------===// // IREE VMVX (Virtual Machine-based Linear Algebra) dialect @@ -38,10 +38,10 @@ //===----------------------------------------------------------------------===// def VMVX_DeviceSize : TypeAlias<Index>; -def VMVX_DeviceSizeAttr : IREE_IndexAttrBase<"size_t">; +def VMVX_DeviceSizeAttr : Util_IndexAttrBase<"size_t">; def VMVX_HostSize : TypeAlias<Index>; -def VMVX_HostSizeAttr : IREE_IndexAttrBase<"size_t">; +def VMVX_HostSizeAttr : Util_IndexAttrBase<"size_t">; def VMVX_Index : TypeAlias<Index>;
diff --git a/iree/compiler/Dialect/Modules/VMVX/IR/VMVXOps.cpp b/iree/compiler/Dialect/Modules/VMVX/IR/VMVXOps.cpp index 8f8d85f..b8ef691 100644 --- a/iree/compiler/Dialect/Modules/VMVX/IR/VMVXOps.cpp +++ b/iree/compiler/Dialect/Modules/VMVX/IR/VMVXOps.cpp
@@ -6,8 +6,8 @@ #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXTypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/SMLoc.h" #include "mlir/IR/Attributes.h"
diff --git a/iree/compiler/Dialect/Modules/VMVX/IR/VMVXOps.h b/iree/compiler/Dialect/Modules/VMVX/IR/VMVXOps.h index b06a990..ef13ba6 100644 --- a/iree/compiler/Dialect/Modules/VMVX/IR/VMVXOps.h +++ b/iree/compiler/Dialect/Modules/VMVX/IR/VMVXOps.h
@@ -9,8 +9,8 @@ #include <cstdint> -#include "iree/compiler/Dialect/IREE/IR/IREETraits.h" #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXTypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTraits.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h"
diff --git a/iree/compiler/Dialect/Modules/VMVX/IR/VMVXTypes.h b/iree/compiler/Dialect/Modules/VMVX/IR/VMVXTypes.h index bf36ff0..266297f 100644 --- a/iree/compiler/Dialect/Modules/VMVX/IR/VMVXTypes.h +++ b/iree/compiler/Dialect/Modules/VMVX/IR/VMVXTypes.h
@@ -9,7 +9,7 @@ #include <cstdint> -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/Optional.h"
diff --git a/iree/compiler/Dialect/Modules/VMVX/Transforms/BUILD b/iree/compiler/Dialect/Modules/VMVX/Transforms/BUILD index 2acdc18..f4c1116 100644 --- a/iree/compiler/Dialect/Modules/VMVX/Transforms/BUILD +++ b/iree/compiler/Dialect/Modules/VMVX/Transforms/BUILD
@@ -25,14 +25,14 @@ "//iree/compiler/Codegen/LLVMCPU", "//iree/compiler/Dialect/HAL/IR:HALDialect", "//iree/compiler/Dialect/HAL/Transforms", - "//iree/compiler/Dialect/IREE/IR", - "//iree/compiler/Dialect/IREE/Transforms", "//iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX", "//iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX", "//iree/compiler/Dialect/Modules/VMVX/IR", "//iree/compiler/Dialect/Modules/VMVX/IR:VMVXDialect", "//iree/compiler/Dialect/Shape/IR", "//iree/compiler/Dialect/Shape/Transforms", + "//iree/compiler/Dialect/Util/IR", + "//iree/compiler/Dialect/Util/Transforms", "//iree/compiler/Dialect/VM/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:Affine",
diff --git a/iree/compiler/Dialect/Modules/VMVX/Transforms/CMakeLists.txt b/iree/compiler/Dialect/Modules/VMVX/Transforms/CMakeLists.txt index d8af381..0f83203 100644 --- a/iree/compiler/Dialect/Modules/VMVX/Transforms/CMakeLists.txt +++ b/iree/compiler/Dialect/Modules/VMVX/Transforms/CMakeLists.txt
@@ -46,14 +46,14 @@ iree::compiler::Codegen::PassHeaders iree::compiler::Dialect::HAL::IR::HALDialect iree::compiler::Dialect::HAL::Transforms - iree::compiler::Dialect::IREE::IR - iree::compiler::Dialect::IREE::Transforms iree::compiler::Dialect::Modules::VMVX::Conversion::HALToVMVX iree::compiler::Dialect::Modules::VMVX::Conversion::StandardToVMVX iree::compiler::Dialect::Modules::VMVX::IR iree::compiler::Dialect::Modules::VMVX::IR::VMVXDialect iree::compiler::Dialect::Shape::IR iree::compiler::Dialect::Shape::Transforms + iree::compiler::Dialect::Util::IR + iree::compiler::Dialect::Util::Transforms iree::compiler::Dialect::VM::IR PUBLIC )
diff --git a/iree/compiler/Dialect/Modules/VMVX/Transforms/Conversion.cpp b/iree/compiler/Dialect/Modules/VMVX/Transforms/Conversion.cpp index b062fe0..ee4d50f 100644 --- a/iree/compiler/Dialect/Modules/VMVX/Transforms/Conversion.cpp +++ b/iree/compiler/Dialect/Modules/VMVX/Transforms/Conversion.cpp
@@ -5,12 +5,12 @@ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception #include "iree/compiler/Dialect/HAL/IR/HALDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" #include "iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.h" #include "iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.h" #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXDialect.h" #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXTypes.h" #include "iree/compiler/Dialect/Modules/VMVX/Transforms/Passes.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" #include "iree/compiler/Dialect/VM/IR/VMDialect.h" #include "llvm/ADT/STLExtras.h" #include "mlir/Conversion/TosaToStandard/TosaToStandard.h" @@ -34,8 +34,9 @@ : public PassWrapper<ConversionPass, OperationPass<mlir::ModuleOp>> { public: void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert<IREEDialect, IREE::HAL::HALDialect, IREE::VM::VMDialect, - IREE::VMVX::VMVXDialect, memref::MemRefDialect>(); + registry.insert<IREE::Util::UtilDialect, IREE::HAL::HALDialect, + IREE::VM::VMDialect, IREE::VMVX::VMVXDialect, + memref::MemRefDialect>(); } StringRef getArgument() const override { return "iree-vmvx-conversion"; } @@ -64,7 +65,7 @@ ConversionTarget conversionTarget(*context); conversionTarget.addIllegalDialect<IREE::HAL::HALDialect>(); conversionTarget.addIllegalDialect<tensor::TensorDialect>(); - conversionTarget.addLegalDialect<IREEDialect>(); + conversionTarget.addLegalDialect<IREE::Util::UtilDialect>(); conversionTarget.addLegalDialect<IREE::VMVX::VMVXDialect>(); conversionTarget.addLegalDialect<mlir::StandardOpsDialect>(); conversionTarget.addLegalDialect<mlir::AffineDialect>();
diff --git a/iree/compiler/Dialect/Shape/IR/BUILD b/iree/compiler/Dialect/Shape/IR/BUILD index 23862fd..81a4410 100644 --- a/iree/compiler/Dialect/Shape/IR/BUILD +++ b/iree/compiler/Dialect/Shape/IR/BUILD
@@ -27,7 +27,7 @@ include = ["*.td"], ), deps = [ - "//iree/compiler/Dialect/IREE/IR:td_files", + "//iree/compiler/Dialect/Util/IR:td_files", "@llvm-project//mlir:InferTypeOpInterfaceTdFiles", "@llvm-project//mlir:OpBaseTdFiles", "@llvm-project//mlir:SideEffectTdFiles", @@ -58,7 +58,7 @@ deps = [ ":ShapeInterfacesGen", ":ShapeOpsGen", - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Utils", "@llvm-project//llvm:Support", "@llvm-project//mlir:ControlFlowInterfaces",
diff --git a/iree/compiler/Dialect/Shape/IR/CMakeLists.txt b/iree/compiler/Dialect/Shape/IR/CMakeLists.txt index 367faf8..4c62a97 100644 --- a/iree/compiler/Dialect/Shape/IR/CMakeLists.txt +++ b/iree/compiler/Dialect/Shape/IR/CMakeLists.txt
@@ -42,7 +42,7 @@ MLIRSupport MLIRTransforms MLIRViewLikeInterface - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR iree::compiler::Utils PUBLIC )
diff --git a/iree/compiler/Dialect/Shape/IR/ShapeTypes.h b/iree/compiler/Dialect/Shape/IR/ShapeTypes.h index b045182..3a7ca07 100644 --- a/iree/compiler/Dialect/Shape/IR/ShapeTypes.h +++ b/iree/compiler/Dialect/Shape/IR/ShapeTypes.h
@@ -7,7 +7,7 @@ #ifndef IREE_COMPILER_DIALECT_SHAPE_IR_IREETYPES_H_ #define IREE_COMPILER_DIALECT_SHAPE_IR_IREETYPES_H_ -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Diagnostics.h" #include "mlir/IR/Location.h"
diff --git a/iree/compiler/Dialect/IREE/BUILD b/iree/compiler/Dialect/Util/BUILD similarity index 100% rename from iree/compiler/Dialect/IREE/BUILD rename to iree/compiler/Dialect/Util/BUILD
diff --git a/iree/compiler/Dialect/IREE/CMakeLists.txt b/iree/compiler/Dialect/Util/CMakeLists.txt similarity index 91% rename from iree/compiler/Dialect/IREE/CMakeLists.txt rename to iree/compiler/Dialect/Util/CMakeLists.txt index bdf003b..e8862cd 100644 --- a/iree/compiler/Dialect/IREE/CMakeLists.txt +++ b/iree/compiler/Dialect/Util/CMakeLists.txt
@@ -1,6 +1,6 @@ ################################################################################ # Autogenerated by build_tools/bazel_to_cmake/bazel_to_cmake.py from # -# iree/compiler/Dialect/IREE/BUILD # +# iree/compiler/Dialect/Util/BUILD # # # # Use iree_cmake_extra_content from iree/build_defs.oss.bzl to add arbitrary # # CMake-only content. #
diff --git a/iree/compiler/Dialect/IREE/Conversion/BUILD b/iree/compiler/Dialect/Util/Conversion/BUILD similarity index 93% rename from iree/compiler/Dialect/IREE/Conversion/BUILD rename to iree/compiler/Dialect/Util/Conversion/BUILD index 20b58e7..df5120e 100644 --- a/iree/compiler/Dialect/IREE/Conversion/BUILD +++ b/iree/compiler/Dialect/Util/Conversion/BUILD
@@ -19,7 +19,7 @@ "PreserveCompilerHints.h", ], deps = [ - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//mlir:IR", "@llvm-project//mlir:Transforms", ],
diff --git a/iree/compiler/Dialect/IREE/Conversion/CMakeLists.txt b/iree/compiler/Dialect/Util/Conversion/CMakeLists.txt similarity index 89% rename from iree/compiler/Dialect/IREE/Conversion/CMakeLists.txt rename to iree/compiler/Dialect/Util/Conversion/CMakeLists.txt index 0b3e597..9b8def8 100644 --- a/iree/compiler/Dialect/IREE/Conversion/CMakeLists.txt +++ b/iree/compiler/Dialect/Util/Conversion/CMakeLists.txt
@@ -1,6 +1,6 @@ ################################################################################ # Autogenerated by build_tools/bazel_to_cmake/bazel_to_cmake.py from # -# iree/compiler/Dialect/IREE/Conversion/BUILD # +# iree/compiler/Dialect/Util/Conversion/BUILD # # # # Use iree_cmake_extra_content from iree/build_defs.oss.bzl to add arbitrary # # CMake-only content. # @@ -20,7 +20,7 @@ DEPS MLIRIR MLIRTransforms - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/Dialect/Util/Conversion/PreserveCompilerHints.cpp b/iree/compiler/Dialect/Util/Conversion/PreserveCompilerHints.cpp new file mode 100644 index 0000000..f4f1bda --- /dev/null +++ b/iree/compiler/Dialect/Util/Conversion/PreserveCompilerHints.cpp
@@ -0,0 +1,49 @@ +// Copyright 2019 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "iree/compiler/Dialect/Util/Conversion/PreserveCompilerHints.h" + +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Transforms/DialectConversion.h" + +namespace mlir { +namespace iree_compiler { +namespace IREE { +namespace Util { + +namespace { +class PreserveDoNotOptimize : public OpConversionPattern<DoNotOptimizeOp> { + public: + using OpConversionPattern<DoNotOptimizeOp>::OpConversionPattern; + LogicalResult matchAndRewrite( + DoNotOptimizeOp op, llvm::ArrayRef<Value> operands, + ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp<DoNotOptimizeOp>(op, operands, op->getAttrs()); + return success(); + } +}; +} // namespace + +void setupCompilerHintsLegality(MLIRContext *context, ConversionTarget &target, + TypeConverter &typeConverter) { + target.addDynamicallyLegalOp<DoNotOptimizeOp>([&](DoNotOptimizeOp op) { + return llvm::all_of(op.getResultTypes(), [&typeConverter](Type t) { + return typeConverter.isLegal(t); + }); + }); +} + +void populatePreserveCompilerHintsPatterns(MLIRContext *context, + OwningRewritePatternList &patterns) { + patterns.insert<PreserveDoNotOptimize>(context); +} + +} // namespace Util +} // namespace IREE +} // namespace iree_compiler +} // namespace mlir
diff --git a/iree/compiler/Dialect/IREE/Conversion/PreserveCompilerHints.h b/iree/compiler/Dialect/Util/Conversion/PreserveCompilerHints.h similarity index 93% rename from iree/compiler/Dialect/IREE/Conversion/PreserveCompilerHints.h rename to iree/compiler/Dialect/Util/Conversion/PreserveCompilerHints.h index 1926bbf..96ca616 100644 --- a/iree/compiler/Dialect/IREE/Conversion/PreserveCompilerHints.h +++ b/iree/compiler/Dialect/Util/Conversion/PreserveCompilerHints.h
@@ -13,6 +13,8 @@ namespace mlir { namespace iree_compiler { +namespace IREE { +namespace Util { // Adds op legality rules to |conversionTarget| to preserve compiler hints // that satisfy the type constraints of |typeConverter|. @@ -25,6 +27,8 @@ void populatePreserveCompilerHintsPatterns(MLIRContext *context, OwningRewritePatternList &patterns); +} // namespace Util +} // namespace IREE } // namespace iree_compiler } // namespace mlir
diff --git a/iree/compiler/Dialect/IREE/Conversion/test/BUILD b/iree/compiler/Dialect/Util/Conversion/test/BUILD similarity index 100% rename from iree/compiler/Dialect/IREE/Conversion/test/BUILD rename to iree/compiler/Dialect/Util/Conversion/test/BUILD
diff --git a/iree/compiler/Dialect/IREE/Conversion/test/CMakeLists.txt b/iree/compiler/Dialect/Util/Conversion/test/CMakeLists.txt similarity index 93% rename from iree/compiler/Dialect/IREE/Conversion/test/CMakeLists.txt rename to iree/compiler/Dialect/Util/Conversion/test/CMakeLists.txt index 5dc210f..6ff9b49 100644 --- a/iree/compiler/Dialect/IREE/Conversion/test/CMakeLists.txt +++ b/iree/compiler/Dialect/Util/Conversion/test/CMakeLists.txt
@@ -1,6 +1,6 @@ ################################################################################ # Autogenerated by build_tools/bazel_to_cmake/bazel_to_cmake.py from # -# iree/compiler/Dialect/IREE/Conversion/test/BUILD # +# iree/compiler/Dialect/Util/Conversion/test/BUILD # # # # Use iree_cmake_extra_content from iree/build_defs.oss.bzl to add arbitrary # # CMake-only content. #
diff --git a/iree/compiler/Dialect/IREE/Conversion/test/convert_flow_to_hal.mlir b/iree/compiler/Dialect/Util/Conversion/test/convert_flow_to_hal.mlir similarity index 62% rename from iree/compiler/Dialect/IREE/Conversion/test/convert_flow_to_hal.mlir rename to iree/compiler/Dialect/Util/Conversion/test/convert_flow_to_hal.mlir index 21e086e..e611cd9 100644 --- a/iree/compiler/Dialect/IREE/Conversion/test/convert_flow_to_hal.mlir +++ b/iree/compiler/Dialect/Util/Conversion/test/convert_flow_to_hal.mlir
@@ -2,12 +2,12 @@ // CHECK-LABEL: @preserve_compiler_hints func @preserve_compiler_hints() { - // CHECK: iree.do_not_optimize() - iree.do_not_optimize() + // CHECK: util.do_not_optimize() + util.do_not_optimize() // CHECK: %[[C:.+]] = constant 2 %c = constant 2 : i32 - // CHECK: iree.do_not_optimize(%[[C]]) - iree.do_not_optimize(%c) : i32 + // CHECK: util.do_not_optimize(%[[C]]) + util.do_not_optimize(%c) : i32 return }
diff --git a/iree/compiler/Dialect/IREE/IR/BUILD b/iree/compiler/Dialect/Util/IR/BUILD similarity index 68% rename from iree/compiler/Dialect/IREE/IR/BUILD rename to iree/compiler/Dialect/Util/IR/BUILD index cddd1c7..9ee3355 100644 --- a/iree/compiler/Dialect/IREE/IR/BUILD +++ b/iree/compiler/Dialect/Util/IR/BUILD
@@ -14,15 +14,15 @@ licenses = ["notice"], # Apache 2.0 ) -exports_files(["IREEBase.td"]) +exports_files(["UtilBase.td"]) td_library( name = "td_files", srcs = enforce_glob( [ - "IREEBase.td", - "IREEInterfaces.td", - "IREEOps.td", + "UtilBase.td", + "UtilInterfaces.td", + "UtilOps.td", ], include = ["*.td"], ), @@ -35,23 +35,23 @@ cc_library( name = "IR", srcs = [ - "IREEDialect.cpp", - "IREEOpInterfaces.cpp.inc", - "IREEOps.cpp", - "IREEOps.cpp.inc", - "IREETypes.cpp", + "UtilDialect.cpp", + "UtilOpInterfaces.cpp.inc", + "UtilOps.cpp", + "UtilOps.cpp.inc", + "UtilTypes.cpp", ], hdrs = [ - "IREEDialect.h", - "IREEOpInterfaces.h.inc", - "IREEOps.h", - "IREEOps.h.inc", - "IREETraits.h", - "IREETypes.h", + "UtilDialect.h", + "UtilOpInterfaces.h.inc", + "UtilOps.h", + "UtilOps.h.inc", + "UtilTraits.h", + "UtilTypes.h", ], deps = [ - ":IREEInterfacesGen", - ":IREEOpsGen", + ":UtilInterfacesGen", + ":UtilOpsGen", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR", "@llvm-project//mlir:Parser", @@ -62,48 +62,48 @@ ) gentbl_cc_library( - name = "IREEInterfacesGen", + name = "UtilInterfacesGen", tbl_outs = [ ( ["-gen-op-interface-decls"], - "IREEOpInterfaces.h.inc", + "UtilOpInterfaces.h.inc", ), ( ["-gen-op-interface-defs"], - "IREEOpInterfaces.cpp.inc", + "UtilOpInterfaces.cpp.inc", ), ], tblgen = "@llvm-project//mlir:mlir-tblgen", - td_file = "IREEInterfaces.td", + td_file = "UtilInterfaces.td", deps = [":td_files"], ) gentbl_cc_library( - name = "IREEOpsGen", + name = "UtilOpsGen", tbl_outs = [ ( ["-gen-op-decls"], - "IREEOps.h.inc", + "UtilOps.h.inc", ), ( ["-gen-op-defs"], - "IREEOps.cpp.inc", + "UtilOps.cpp.inc", ), ], tblgen = "@llvm-project//mlir:mlir-tblgen", - td_file = "IREEOps.td", + td_file = "UtilOps.td", deps = [":td_files"], ) iree_tablegen_doc( - name = "IREEDialectDocGen", + name = "UtilUtilDialectDocGen", tbl_outs = [ ( ["-gen-dialect-doc"], - "IREEDialect.md", + "UtilUtilDialect.md", ), ], tblgen = "@llvm-project//mlir:mlir-tblgen", - td_file = "IREEOps.td", + td_file = "UtilOps.td", deps = [":td_files"], )
diff --git a/iree/compiler/Dialect/IREE/IR/CMakeLists.txt b/iree/compiler/Dialect/Util/IR/CMakeLists.txt similarity index 61% rename from iree/compiler/Dialect/IREE/IR/CMakeLists.txt rename to iree/compiler/Dialect/Util/IR/CMakeLists.txt index f867c19..c8e23d6 100644 --- a/iree/compiler/Dialect/IREE/IR/CMakeLists.txt +++ b/iree/compiler/Dialect/Util/IR/CMakeLists.txt
@@ -1,6 +1,6 @@ ################################################################################ # Autogenerated by build_tools/bazel_to_cmake/bazel_to_cmake.py from # -# iree/compiler/Dialect/IREE/IR/BUILD # +# iree/compiler/Dialect/Util/IR/BUILD # # # # Use iree_cmake_extra_content from iree/build_defs.oss.bzl to add arbitrary # # CMake-only content. # @@ -14,21 +14,21 @@ NAME IR HDRS - "IREEDialect.h" - "IREEOpInterfaces.h.inc" - "IREEOps.h" - "IREEOps.h.inc" - "IREETraits.h" - "IREETypes.h" + "UtilDialect.h" + "UtilOpInterfaces.h.inc" + "UtilOps.h" + "UtilOps.h.inc" + "UtilTraits.h" + "UtilTypes.h" SRCS - "IREEDialect.cpp" - "IREEOpInterfaces.cpp.inc" - "IREEOps.cpp" - "IREEOps.cpp.inc" - "IREETypes.cpp" + "UtilDialect.cpp" + "UtilOpInterfaces.cpp.inc" + "UtilOps.cpp" + "UtilOps.cpp.inc" + "UtilTypes.cpp" DEPS - ::IREEInterfacesGen - ::IREEOpsGen + ::UtilInterfacesGen + ::UtilOpsGen LLVMSupport MLIRIR MLIRParser @@ -40,31 +40,31 @@ iree_tablegen_library( NAME - IREEInterfacesGen + UtilInterfacesGen TD_FILE - "IREEInterfaces.td" + "UtilInterfaces.td" OUTS - -gen-op-interface-decls IREEOpInterfaces.h.inc - -gen-op-interface-defs IREEOpInterfaces.cpp.inc + -gen-op-interface-decls UtilOpInterfaces.h.inc + -gen-op-interface-defs UtilOpInterfaces.cpp.inc ) iree_tablegen_library( NAME - IREEOpsGen + UtilOpsGen TD_FILE - "IREEOps.td" + "UtilOps.td" OUTS - -gen-op-decls IREEOps.h.inc - -gen-op-defs IREEOps.cpp.inc + -gen-op-decls UtilOps.h.inc + -gen-op-defs UtilOps.cpp.inc ) iree_tablegen_doc( NAME - IREEDialectDocGen + UtilUtilDialectDocGen TD_FILE - "IREEOps.td" + "UtilOps.td" OUTS - -gen-dialect-doc IREEDialect.md + -gen-dialect-doc UtilUtilDialect.md ) ### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_BELOW_THIS_LINE ###
diff --git a/iree/compiler/Dialect/IREE/IR/IREEBase.td b/iree/compiler/Dialect/Util/IR/UtilBase.td similarity index 78% rename from iree/compiler/Dialect/IREE/IR/IREEBase.td rename to iree/compiler/Dialect/Util/IR/UtilBase.td index 4ea21cc..8f25e17 100644 --- a/iree/compiler/Dialect/IREE/IR/IREEBase.td +++ b/iree/compiler/Dialect/Util/IR/UtilBase.td
@@ -4,8 +4,8 @@ // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -#ifndef IREE_DIALECT_IREE_BASE -#define IREE_DIALECT_IREE_BASE +#ifndef IREE_DIALECT_UTIL_IR_UTIL_BASE +#define IREE_DIALECT_UTIL_IR_UTIL_BASE include "mlir/IR/OpBase.td" @@ -13,10 +13,9 @@ // IREE base dialect used for types common across IREE subdialects. //===----------------------------------------------------------------------===// -// TODO(b/143787186): rename when old dialects are removed. -def IREE_Dialect : Dialect { - let name = "iree"; - let cppNamespace = "::mlir::iree_compiler::IREE"; +def Util_Dialect : Dialect { + let name = "util"; + let cppNamespace = "::mlir::iree_compiler::IREE::Util"; let summary = [{ A dialect used for types common across IREE subdialects. @@ -27,13 +26,13 @@ // General types and helpers //===----------------------------------------------------------------------===// -def IREE_Bool : +def Util_Bool : AnyTypeOf<[I1, I8], "boolean-storing type (1 or 8 -bit integer)">; -def IREE_Element : AnyTypeOf<[AnySignlessInteger, AnyFloat]>; -def IREE_MemRef : MemRefOf<[IREE_Element]>; -def IREE_Tensor : TensorOf<[IREE_Element]>; +def Util_Element : AnyTypeOf<[AnySignlessInteger, AnyFloat]>; +def Util_MemRef : MemRefOf<[Util_Element]>; +def Util_Tensor : TensorOf<[Util_Element]>; -class IREE_IndexAttrBase<string descr> : +class Util_IndexAttrBase<string descr> : TypedAttrBase< Index, "IntegerAttr", And<[ @@ -43,10 +42,10 @@ descr> { let returnType = [{ APInt }]; } -def IREE_IndexAttr : IREE_IndexAttrBase<"size_t">; +def Util_IndexAttr : Util_IndexAttrBase<"size_t">; -def IREE_TiedOpStorageAttr : - TypedArrayAttrBase<IREE_IndexAttr, "64-bit integer array attribute"> { +def Util_TiedOpStorageAttr : + TypedArrayAttrBase<Util_IndexAttr, "64-bit integer array attribute"> { let constBuilderCall = "$_builder.getI64ArrayAttr($0)"; } @@ -54,7 +53,7 @@ // Status codes //===----------------------------------------------------------------------===// -def IREE_Status : I<32>; +def Util_Status : I<32>; //===----------------------------------------------------------------------===// // Attribute constraints @@ -110,20 +109,20 @@ } //===----------------------------------------------------------------------===// -// IREE_StructAttr +// Util_StructAttr //===----------------------------------------------------------------------===// // This has a custom tablegen generator in StructAttrGen.cpp to create the // attribute and storage types. It differs from the core MLIR StructAttr // by more closely matching what handwritten C++ would have (better typing // and ergonomics and custom parser/printer). -class IREE_StructFieldAttr<string thisName, Attr thisType> { +class Util_StructFieldAttr<string thisName, Attr thisType> { string name = thisName; Attr type = thisType; } -class IREE_StructAttr<string thisKind, string name, Dialect dialect, - list<IREE_StructFieldAttr> attributes> +class Util_StructAttr<string thisKind, string name, Dialect dialect, + list<Util_StructFieldAttr> attributes> : Attr<CPred<"$_self.isa<" # name # ">()">, "structured attribute of " # name> { string kind = thisKind; @@ -133,7 +132,7 @@ let returnType = name; let convertFromStorage = "$_self"; Dialect structDialect = dialect; - list<IREE_StructFieldAttr> fields = attributes; + list<Util_StructFieldAttr> fields = attributes; } //===----------------------------------------------------------------------===// @@ -141,41 +140,41 @@ //===----------------------------------------------------------------------===// def AnyPtr : DialectType< - IREE_Dialect, - CPred<"$_self.isa<IREE::PtrType>()">, + Util_Dialect, + CPred<"$_self.isa<IREE::Util::PtrType>()">, "ptr"> { let description = [{ Pointer to a typed value. }]; - let builderCall = "$_builder.getType<IREE::PtrType>()"; + let builderCall = "$_builder.getType<IREE::Util::PtrType>()"; } class PtrOf<Type type> : Type<And<[ - CPred<"$_self.isa<IREE::PtrType>()">, - SubstLeaves<"$_self", "$_self.cast<IREE::PtrType>().getTargetType()", + CPred<"$_self.isa<IREE::Util::PtrType>()">, + SubstLeaves<"$_self", "$_self.cast<IREE::Util::PtrType>().getTargetType()", type.predicate> ]>, "ptr<" # type.summary # ">"> { // Set the builder call if the base type has a builder call. string builderCall = !if(!empty(type.builderCall), - "", "PtrType::get(" # type.builderCall # ")"); + "", "IREE::Util::PtrType::get(" # type.builderCall # ")"); } class AnyPtrOf<list<Type> types> : Type<And<[ - CPred<"$_self.isa<IREE::PtrType>()">, + CPred<"$_self.isa<IREE::Util::PtrType>()">, Or<!foreach(type, types, SubstLeaves< "$_self", - "$_self.cast<IREE::PtrType>().getTargetType()", + "$_self.cast<IREE::Util::PtrType>().getTargetType()", type.predicate>)>, ]>, !interleave(!foreach(type, types, type.summary), " or ")> { string builderCall = ""; } def ByteBufferType : DialectType< - IREE_Dialect, - CPred<"$_self.isa<IREE::ByteBufferType>()">, + Util_Dialect, + CPred<"$_self.isa<IREE::Util::ByteBufferType>()">, "byte_buffer"> { let description = [{ A constant buffer of mapped host memory. @@ -184,8 +183,8 @@ def ByteBufferRefAttr : AliasedSymbolRefAttr; def MutableByteBufferType : DialectType< - IREE_Dialect, - CPred<"$_self.isa<IREE::MutableByteBufferType>()">, + Util_Dialect, + CPred<"$_self.isa<IREE::Util::MutableByteBufferType>()">, "mutable_byte_buffer"> { let description = [{ A buffer of read-write host memory. @@ -197,8 +196,8 @@ //===----------------------------------------------------------------------===// def AnyList : DialectType< - IREE_Dialect, - CPred<"$_self.isa<IREE::ListType>()">, + Util_Dialect, + CPred<"$_self.isa<IREE::Util::ListType>()">, "list"> { let description = [{ A resizable list of some type. @@ -207,14 +206,14 @@ class ListOf<Type type> : Type<And<[ - CPred<"$_self.isa<IREE::ListType>()">, + CPred<"$_self.isa<IREE::Util::ListType>()">, SubstLeaves<"$_self", - "$_self.cast<IREE::ListType>().getElementType()", + "$_self.cast<IREE::Util::ListType>().getElementType()", type.predicate> ]>, "list<" # type.summary # ">"> { // Set the builder call if the base type has a builder call. string builderCall = !if(!empty(type.builderCall), - "", "IREE::ListType::get(" # type.builderCall # ")"); + "", "IREE::Util::ListType::get(" # type.builderCall # ")"); } //===----------------------------------------------------------------------===// @@ -231,11 +230,11 @@ // Need to add a new call type and function attr for 'async', then can validate // entire parts of the call tree are either async-compatible or async-hostile. // Only vm.call_async callees may contain an operation with YieldPoint. -def YieldPoint : NativeOpTrait<"IREE::YieldPoint">; +def YieldPoint : NativeOpTrait<"IREE::Util::YieldPoint">; // Denotes that an operation is potentially "unsafe" if used. // Untrusted modules containing ops marked as unsafe will fail to verify at // runtime if loaded for dynamic execution. -def Unsafe : NativeOpTrait<"IREE::Unsafe">; +def Unsafe : NativeOpTrait<"IREE::Util::Unsafe">; -#endif // IREE_DIALECT_IREE_BASE +#endif // IREE_DIALECT_UTIL_IR_UTIL_BASE
diff --git a/iree/compiler/Dialect/IREE/IR/IREEDialect.cpp b/iree/compiler/Dialect/Util/IR/UtilDialect.cpp similarity index 71% rename from iree/compiler/Dialect/IREE/IR/IREEDialect.cpp rename to iree/compiler/Dialect/Util/IR/UtilDialect.cpp index a51475c..b800708 100644 --- a/iree/compiler/Dialect/IREE/IR/IREEDialect.cpp +++ b/iree/compiler/Dialect/Util/IR/UtilDialect.cpp
@@ -4,10 +4,10 @@ // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/SourceMgr.h" #include "mlir/IR/Attributes.h" @@ -19,6 +19,8 @@ namespace mlir { namespace iree_compiler { +namespace IREE { +namespace Util { // Used to control inlining behavior. struct IREEInlinerInterface : public DialectInlinerInterface { @@ -43,21 +45,21 @@ } }; -IREEDialect::IREEDialect(MLIRContext *context) - : Dialect(getDialectNamespace(), context, TypeID::get<IREEDialect>()) { +UtilDialect::UtilDialect(MLIRContext *context) + : Dialect(getDialectNamespace(), context, TypeID::get<UtilDialect>()) { addInterfaces<IREEInlinerInterface>(); registerTypes(); #define GET_OP_LIST addOperations< -#include "iree/compiler/Dialect/IREE/IR/IREEOps.cpp.inc" +#include "iree/compiler/Dialect/Util/IR/UtilOps.cpp.inc" >(); } -Type IREEDialect::parseType(DialectAsmParser &parser) const { +Type UtilDialect::parseType(DialectAsmParser &parser) const { Location loc = parser.getEncodedSourceLoc(parser.getNameLoc()); llvm::StringRef spec = parser.getFullSymbolSpec(); if (spec == "variant") { - return IREE::VariantType::get(getContext()); + return IREE::Util::VariantType::get(getContext()); } else if (spec.consume_front("ptr")) { if (!spec.consume_front("<") || !spec.consume_back(">")) { parser.emitError(parser.getCurrentLocation()) @@ -71,11 +73,11 @@ << parser.getFullSymbolSpec() << "'"; return Type(); } - return IREE::PtrType::getChecked(variableType, loc); + return IREE::Util::PtrType::getChecked(variableType, loc); } else if (spec == "byte_buffer") { - return IREE::ByteBufferType::get(getContext()); + return IREE::Util::ByteBufferType::get(getContext()); } else if (spec == "mutable_byte_buffer") { - return IREE::MutableByteBufferType::get(getContext()); + return IREE::Util::MutableByteBufferType::get(getContext()); } else if (spec.consume_front("list")) { if (!spec.consume_front("<") || !spec.consume_back(">")) { parser.emitError(parser.getCurrentLocation()) @@ -84,7 +86,7 @@ } Type elementType; if (spec == "?") { - elementType = IREE::VariantType::get(getContext()); + elementType = IREE::Util::VariantType::get(getContext()); } else { elementType = mlir::parseType(spec, getContext()); } @@ -94,24 +96,24 @@ << parser.getFullSymbolSpec() << "'"; return Type(); } - return IREE::ListType::getChecked(elementType, loc); + return IREE::Util::ListType::getChecked(elementType, loc); } emitError(loc, "unknown IREE type: ") << spec; return Type(); } -void IREEDialect::printType(Type type, DialectAsmPrinter &os) const { - if (type.isa<IREE::VariantType>()) { +void UtilDialect::printType(Type type, DialectAsmPrinter &os) const { + if (type.isa<IREE::Util::VariantType>()) { os << "variant"; - } else if (auto ptrType = type.dyn_cast<IREE::PtrType>()) { + } else if (auto ptrType = type.dyn_cast<IREE::Util::PtrType>()) { os << "ptr<" << ptrType.getTargetType() << ">"; - } else if (type.isa<IREE::ByteBufferType>()) { + } else if (type.isa<IREE::Util::ByteBufferType>()) { os << "byte_buffer"; - } else if (type.isa<IREE::MutableByteBufferType>()) { + } else if (type.isa<IREE::Util::MutableByteBufferType>()) { os << "mutable_byte_buffer"; - } else if (auto listType = type.dyn_cast<IREE::ListType>()) { + } else if (auto listType = type.dyn_cast<IREE::Util::ListType>()) { os << "list<"; - if (listType.getElementType().isa<IREE::VariantType>()) { + if (listType.getElementType().isa<IREE::Util::VariantType>()) { os << "?"; } else { os << listType.getElementType(); @@ -122,5 +124,7 @@ } } +} // namespace Util +} // namespace IREE } // namespace iree_compiler } // namespace mlir
diff --git a/iree/compiler/Dialect/IREE/IR/IREEDialect.h b/iree/compiler/Dialect/Util/IR/UtilDialect.h similarity index 79% rename from iree/compiler/Dialect/IREE/IR/IREEDialect.h rename to iree/compiler/Dialect/Util/IR/UtilDialect.h index 6268cac..6d2e99f 100644 --- a/iree/compiler/Dialect/IREE/IR/IREEDialect.h +++ b/iree/compiler/Dialect/Util/IR/UtilDialect.h
@@ -12,11 +12,13 @@ namespace mlir { namespace iree_compiler { +namespace IREE { +namespace Util { -class IREEDialect : public Dialect { +class UtilDialect : public Dialect { public: - explicit IREEDialect(MLIRContext* context); - static StringRef getDialectNamespace() { return "iree"; } + explicit UtilDialect(MLIRContext* context); + static StringRef getDialectNamespace() { return "util"; } /// Parses a type registered to this dialect. Type parseType(DialectAsmParser& parser) const override; @@ -29,6 +31,8 @@ void registerTypes(); }; +} // namespace Util +} // namespace IREE } // namespace iree_compiler } // namespace mlir
diff --git a/iree/compiler/Dialect/IREE/IR/IREEInterfaces.td b/iree/compiler/Dialect/Util/IR/UtilInterfaces.td similarity index 87% rename from iree/compiler/Dialect/IREE/IR/IREEInterfaces.td rename to iree/compiler/Dialect/Util/IR/UtilInterfaces.td index eb35f5f..ffed6dd 100644 --- a/iree/compiler/Dialect/IREE/IR/IREEInterfaces.td +++ b/iree/compiler/Dialect/Util/IR/UtilInterfaces.td
@@ -4,16 +4,18 @@ // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -#ifndef IREE_DIALECT_IREE_INTERFACES -#define IREE_DIALECT_IREE_INTERFACES +#ifndef IREE_DIALECT_UTIL_IR_UTIL_INTERFACES +#define IREE_DIALECT_UTIL_IR_UTIL_INTERFACES -include "iree/compiler/Dialect/IREE/IR/IREEBase.td" +include "iree/compiler/Dialect/Util/IR/UtilBase.td" //===----------------------------------------------------------------------===// -// IREE::TiedOpInterface +// IREE::Util::TiedOpInterface //===----------------------------------------------------------------------===// -def IREE_TiedOpInterface : OpInterface<"TiedOpInterface"> { +def Util_TiedOpInterface : OpInterface<"TiedOpInterface"> { + let cppNamespace = "::mlir::iree_compiler::IREE::Util"; + let description = [{ An operation that "ties" one or more results to its operands indicating that the result is directly related to the operand in an operation-defined @@ -40,7 +42,7 @@ The default implementations use an attribute on the op to store the relationship: - `OptionalAttr<IREE_TiedOpStorageAttr>:$tied_operands` + `OptionalAttr<Util_TiedOpStorageAttr>:$tied_operands` Note that `$tied_operands` are indices inside the operand range returned by `getTiedOperandsIndexAndLength`, which may *not* be the full operand @@ -82,7 +84,7 @@ /*args=*/(ins "unsigned":$resultIndex), /*methodBody=*/[{}], /*defaultImplementation=*/[{ - return IREE::TiedOpInterface::findTiedBaseValue($_op.getResult(resultIndex)); + return IREE::Util::TiedOpInterface::findTiedBaseValue($_op.getResult(resultIndex)); }] >, InterfaceMethod< @@ -114,7 +116,7 @@ /*args=*/(ins "unsigned":$resultIndex), /*methodBody=*/[{}], /*defaultImplementation=*/[{ - return IREE::detail::getTiedResultOperandIndex($_op, resultIndex); + return IREE::Util::detail::getTiedResultOperandIndex($_op, resultIndex); }] >, InterfaceMethod< @@ -130,7 +132,7 @@ "::llvm::Optional<unsigned>":$operandIndex), /*methodBody=*/[{}], /*defaultImplementation=*/[{ - return IREE::detail::setTiedResultOperandIndex($_op, resultIndex, operandIndex); + return IREE::Util::detail::setTiedResultOperandIndex($_op, resultIndex, operandIndex); }] >, InterfaceMethod< @@ -146,7 +148,7 @@ /*args=*/(ins), /*methodBody=*/[{}], /*defaultImplementation=*/[{ - return IREE::detail::getTiedResultOperandIndices($_op); + return IREE::Util::detail::getTiedResultOperandIndices($_op); }] >, ]; @@ -164,8 +166,8 @@ }]; let verify = [{ - return IREE::detail::verifyTiedOp($_op); + return IREE::Util::detail::verifyTiedOp($_op); }]; } -#endif // IREE_DIALECT_IREE_INTERFACES +#endif // IREE_DIALECT_UTIL_IR_UTIL_INTERFACES
diff --git a/iree/compiler/Dialect/IREE/IR/IREEOps.cpp b/iree/compiler/Dialect/Util/IR/UtilOps.cpp similarity index 90% rename from iree/compiler/Dialect/IREE/IR/IREEOps.cpp rename to iree/compiler/Dialect/Util/IR/UtilOps.cpp index 8b903d5..3977552 100644 --- a/iree/compiler/Dialect/IREE/IR/IREEOps.cpp +++ b/iree/compiler/Dialect/Util/IR/UtilOps.cpp
@@ -4,9 +4,9 @@ // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/SMLoc.h" @@ -25,9 +25,10 @@ namespace mlir { namespace iree_compiler { namespace IREE { +namespace Util { //===----------------------------------------------------------------------===// -// iree.do_not_optimize +// util.do_not_optimize //===----------------------------------------------------------------------===// void DoNotOptimizeOp::build(OpBuilder &builder, OperationState &state, @@ -56,7 +57,7 @@ } void printDoNotOptimizeOp(OpAsmPrinter &p, Operation *op) { - p << "iree.do_not_optimize"; + p << "util.do_not_optimize"; p << "("; p.printOperands(op->getOperands()); p << ")"; @@ -88,7 +89,7 @@ } //===----------------------------------------------------------------------===// -// iree.unfoldable_constant +// util.unfoldable_constant //===----------------------------------------------------------------------===// // Parsing/printing copied from std.constant @@ -112,8 +113,8 @@ } void printUnfoldableConstantOp(OpAsmPrinter &p, Operation *op) { - auto constOp = cast<IREE::UnfoldableConstantOp>(op); - p << "iree.unfoldable_constant "; + auto constOp = cast<IREE::Util::UnfoldableConstantOp>(op); + p << "util.unfoldable_constant "; p.printOptionalAttrDict(constOp->getAttrs(), /*elidedAttrs=*/{"value"}); if (constOp->getAttrs().size() > 1) p << ' '; @@ -127,7 +128,7 @@ struct ExpandUnfoldableConstantOp : public OpRewritePattern<UnfoldableConstantOp> { - using OpRewritePattern<IREE::UnfoldableConstantOp>::OpRewritePattern; + using OpRewritePattern<IREE::Util::UnfoldableConstantOp>::OpRewritePattern; LogicalResult matchAndRewrite(UnfoldableConstantOp op, PatternRewriter &rewriter) const override { auto stdConst = rewriter.create<ConstantOp>(op.getLoc(), op.value()); @@ -151,7 +152,7 @@ Type &elementType) { if (failed(parser.parseType(listType))) { return parser.emitError(parser.getCurrentLocation(), - "expected !iree.list<T> type"); + "expected !util.list<T> type"); } auto listElementType = listType.cast<ListType>().getElementType(); if (succeeded(parser.parseOptionalArrow())) { @@ -188,18 +189,18 @@ Type leadingType; if (failed(parser.parseType(leadingType))) { return parser.emitError(parser.getCurrentLocation(), - "expected element type or !iree.list<T> type"); + "expected element type or !util.list<T> type"); } if (succeeded(parser.parseOptionalArrow())) { elementType = leadingType; if (failed(parser.parseType(listType)) || !listType.isa<ListType>()) { return parser.emitError(parser.getCurrentLocation(), - "expected an !iree.list<T> type"); + "expected an !util.list<T> type"); } } else { if (!leadingType.isa<ListType>()) { return parser.emitError(parser.getCurrentLocation(), - "expected an !iree.list<T> type"); + "expected an !util.list<T> type"); } listType = leadingType; elementType = listType.cast<ListType>().getElementType(); @@ -219,7 +220,7 @@ } static LogicalResult verifyListGetOp(ListGetOp &op) { - auto listType = op.list().getType().cast<IREE::ListType>(); + auto listType = op.list().getType().cast<IREE::Util::ListType>(); auto elementType = listType.getElementType(); auto resultType = op.result().getType(); if (!ListType::canImplicitlyCast(elementType, resultType)) { @@ -230,7 +231,7 @@ } static LogicalResult verifyListSetOp(ListSetOp &op) { - auto listType = op.list().getType().cast<IREE::ListType>(); + auto listType = op.list().getType().cast<IREE::Util::ListType>(); auto elementType = listType.getElementType(); auto valueType = op.value().getType(); if (!ListType::canImplicitlyCast(valueType, elementType)) { @@ -240,9 +241,10 @@ return success(); } +} // namespace Util } // namespace IREE } // namespace iree_compiler } // namespace mlir #define GET_OP_CLASSES -#include "iree/compiler/Dialect/IREE/IR/IREEOps.cpp.inc" +#include "iree/compiler/Dialect/Util/IR/UtilOps.cpp.inc"
diff --git a/iree/compiler/Dialect/IREE/IR/IREEOps.h b/iree/compiler/Dialect/Util/IR/UtilOps.h similarity index 89% rename from iree/compiler/Dialect/IREE/IR/IREEOps.h rename to iree/compiler/Dialect/Util/IR/UtilOps.h index 6780f1e..9414f11 100644 --- a/iree/compiler/Dialect/IREE/IR/IREEOps.h +++ b/iree/compiler/Dialect/Util/IR/UtilOps.h
@@ -14,6 +14,6 @@ #include "mlir/Interfaces/SideEffectInterfaces.h" #define GET_OP_CLASSES -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h.inc" // IWYU pragma: export +#include "iree/compiler/Dialect/Util/IR/UtilOps.h.inc" // IWYU pragma: export #endif // IREE_COMPILER_DIALECT_IREE_IR_IREEOPS_H_
diff --git a/iree/compiler/Dialect/IREE/IR/IREEOps.td b/iree/compiler/Dialect/Util/IR/UtilOps.td similarity index 83% rename from iree/compiler/Dialect/IREE/IR/IREEOps.td rename to iree/compiler/Dialect/Util/IR/UtilOps.td index e3196d2..a8912e4 100644 --- a/iree/compiler/Dialect/IREE/IR/IREEOps.td +++ b/iree/compiler/Dialect/Util/IR/UtilOps.td
@@ -4,30 +4,30 @@ // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -#ifndef IREE_DIALECT_IREE_OPS -#define IREE_DIALECT_IREE_OPS +#ifndef IREE_DIALECT_UTIL_IR_UTIL_OPS +#define IREE_DIALECT_UTIL_IR_UTIL_OPS -include "iree/compiler/Dialect/IREE/IR/IREEBase.td" +include "iree/compiler/Dialect/Util/IR/UtilBase.td" include "mlir/Interfaces/SideEffectInterfaces.td" //===----------------------------------------------------------------------===// // Op types //===----------------------------------------------------------------------===// -class IREE_Op<string mnemonic, list<OpTrait> traits = []> : - Op<IREE_Dialect, mnemonic, traits> { +class Util_Op<string mnemonic, list<OpTrait> traits = []> : + Op<Util_Dialect, mnemonic, traits> { let parser = [{ return parse$cppClass(parser, result); }]; let printer = [{ print$cppClass(p, *this); }]; } -class IREE_PureOp<string mnemonic, list<OpTrait> traits = []> : - IREE_Op<mnemonic, !listconcat(traits, [NoSideEffect])>; +class Util_PureOp<string mnemonic, list<OpTrait> traits = []> : + Util_Op<mnemonic, !listconcat(traits, [NoSideEffect])>; //===----------------------------------------------------------------------===// // Byte buffers and host data //===----------------------------------------------------------------------===// -def IREE_NullOp : IREE_PureOp<"null"> { +def Util_NullOp : Util_PureOp<"null"> { let summary = "a null type value"; let description = [{ Defines an SSA value that is lowered into dialects supporting @@ -43,7 +43,7 @@ }]; } -def IREE_ByteBufferConstantOp : IREE_PureOp<"byte_buffer.constant"> { +def Util_ByteBufferConstantOp : Util_PureOp<"byte_buffer.constant"> { let summary = "constant host-side byte buffer"; let description = [{ Defines a compile-time byte buffer based on the given attribute value. @@ -67,7 +67,7 @@ // Compiler hints //===----------------------------------------------------------------------===// -def IREE_DoNotOptimizeOp : IREE_Op<"do_not_optimize"> { +def Util_DoNotOptimizeOp : Util_Op<"do_not_optimize"> { let summary = "Prevents compiler optimizations of a value."; let description = [{ Wraps any operands in an unoptimizable identity. This operation is declared @@ -87,7 +87,7 @@ // TODO(gcmn): It shouldn't be necessary to have both of these ops. Unify the // approach here. -def IREE_DynamicShapeConstantOp : IREE_Op<"dynamic_shape_constant"> { +def Util_DynamicShapeConstantOp : Util_Op<"dynamic_shape_constant"> { let summary = "A tensor constant that can have dynamic dimensions"; let description = [{ Allows specifying a constant where the return value can erase shape @@ -96,7 +96,7 @@ information should be hidden from the compiler and resolved at runtime. ```mlir - %c = iree.dynamic_shape_constant tensor<2x2xf32> -> tensor<?x?xf32> + %c = util.dynamic_shape_constant tensor<2x2xf32> -> tensor<?x?xf32> %res = "mhlo.abs"(%c) : (tensor<?x?xf32>) -> tensor<?x?xf32> ``` }]; @@ -105,12 +105,12 @@ let assemblyFormat = "$value attr-dict `->` type($result)"; } -def IREE_UnfoldableConstantOp : IREE_Op<"unfoldable_constant"> { +def Util_UnfoldableConstantOp : Util_Op<"unfoldable_constant"> { let summary = "A constant that cannot be folded by the compiler."; let description = [{ Similar to a std.constant, but is declared as having a side effect and has no folder. This is really just syntactic sugar as it is canonicalized to a - std.constant wrapped in an iree.do_not_optimize. + std.constant wrapped in an util.do_not_optimize. }]; let arguments = (ins AnyAttr:$value); @@ -123,7 +123,7 @@ let hasCanonicalizer = 1; } -def IREE_UnreachableOp : IREE_Op<"unreachable", [NoSideEffect, Terminator]> { +def Util_UnreachableOp : Util_Op<"unreachable", [NoSideEffect, Terminator]> { let summary = [{unreachable assertion op}]; let description = [{ Signals to the compiler that the parent block should not be reachable. @@ -136,7 +136,7 @@ cond_br %true, ^bb2, ^bb1 ^bb1: // Indicates that this branch should never be taken. - iree.unreachable "shouldn't be here" + util.unreachable "shouldn't be here" ^bb2: ... @@ -152,11 +152,11 @@ // Lists //===----------------------------------------------------------------------===// // NOTE: this type is mostly just a placeholder. Ideally we'd make this -// immutable and have operations like iree.list.append/concat/etc the returned +// immutable and have operations like util.list.append/concat/etc the returned // new SSA values. This would make optimizing the list usage much easier and // enable hoisting/CSE of list access/mutation. -def IREE_ListCreateOp : IREE_PureOp< +def Util_ListCreateOp : Util_PureOp< "list.create", [MemoryEffects<[MemAlloc]>]> { let summary = [{creates a new empty list}]; let description = [{ @@ -173,7 +173,7 @@ let assemblyFormat = "($initial_capacity^)? attr-dict `:` type($result)"; } -def IREE_ListSizeOp : IREE_Op<"list.size", [MemoryEffects<[MemRead]>]> { +def Util_ListSizeOp : Util_Op<"list.size", [MemoryEffects<[MemRead]>]> { let summary = [{the size of the list in elements}]; let description = [{ Returns the current size of the list in elements. @@ -189,7 +189,7 @@ let assemblyFormat = "operands attr-dict `:` type($list)"; } -def IREE_ListResizeOp : IREE_Op<"list.resize", [MemoryEffects<[MemWrite]>]> { +def Util_ListResizeOp : Util_Op<"list.resize", [MemoryEffects<[MemWrite]>]> { let summary = [{resizes the list to a new count in elements}]; let description = [{ Resizes the list to contain `new_size` elements. This will either truncate @@ -205,7 +205,7 @@ let assemblyFormat = "operands attr-dict `:` type($list)"; } -def IREE_ListGetOp : IREE_Op<"list.get", [MemoryEffects<[MemRead]>]> { +def Util_ListGetOp : Util_Op<"list.get", [MemoryEffects<[MemRead]>]> { let summary = [{element accessor}]; let description = [{ Returns the value of the element at the given index. Note that the value @@ -225,7 +225,7 @@ let verifier = [{ return verify$cppClass(*this); }]; } -def IREE_ListSetOp : IREE_Op<"list.set", [MemoryEffects<[MemWrite]>]> { +def Util_ListSetOp : Util_Op<"list.set", [MemoryEffects<[MemWrite]>]> { let summary = [{element mutator}]; let description = [{ Sets the element at the given index to the new value. @@ -242,4 +242,4 @@ let verifier = [{ return verify$cppClass(*this); }]; } -#endif // IREE_DIALECT_IREE_OPS +#endif // IREE_DIALECT_UTIL_IR_UTIL_OPS
diff --git a/iree/compiler/Dialect/IREE/IR/IREETraits.h b/iree/compiler/Dialect/Util/IR/UtilTraits.h similarity index 96% rename from iree/compiler/Dialect/IREE/IR/IREETraits.h rename to iree/compiler/Dialect/Util/IR/UtilTraits.h index c68ae65..3695e4e 100644 --- a/iree/compiler/Dialect/IREE/IR/IREETraits.h +++ b/iree/compiler/Dialect/Util/IR/UtilTraits.h
@@ -12,6 +12,7 @@ namespace mlir { namespace OpTrait { namespace IREE { +namespace Util { template <typename ConcreteType> class YieldPoint : public OpTrait::TraitBase<ConcreteType, YieldPoint> { @@ -31,6 +32,7 @@ } }; +} // namespace Util } // namespace IREE } // namespace OpTrait } // namespace mlir
diff --git a/iree/compiler/Dialect/IREE/IR/IREETypes.cpp b/iree/compiler/Dialect/Util/IR/UtilTypes.cpp similarity index 92% rename from iree/compiler/Dialect/IREE/IR/IREETypes.cpp rename to iree/compiler/Dialect/Util/IR/UtilTypes.cpp index 2fd65f4..8385394 100644 --- a/iree/compiler/Dialect/IREE/IR/IREETypes.cpp +++ b/iree/compiler/Dialect/Util/IR/UtilTypes.cpp
@@ -4,9 +4,9 @@ // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" #include "llvm/ADT/BitVector.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" @@ -17,6 +17,7 @@ namespace mlir { namespace iree_compiler { namespace IREE { +namespace Util { //===----------------------------------------------------------------------===// // ListType @@ -47,8 +48,11 @@ // static bool ListType::canImplicitlyCast(Type from, Type to) { - if (from.isa<IREE::VariantType>() || to.isa<IREE::VariantType>()) return true; - if (from.isa<TensorType>() && to.isa<TensorType>()) return true; + if (from.isa<VariantType>() || to.isa<VariantType>()) { + return true; + } else if (from.isa<TensorType>() && to.isa<TensorType>()) { + return true; + } return from == to; } @@ -107,7 +111,7 @@ Type PtrType::getTargetType() { return getImpl()->targetType; } //===----------------------------------------------------------------------===// -// TiedOpInterface +// IREE::Util::TiedOpInterface //===----------------------------------------------------------------------===// llvm::Optional<unsigned> detail::getTiedResultOperandIndex( @@ -239,18 +243,19 @@ } // At the end so it can use functions above: -#include "iree/compiler/Dialect/IREE/IR/IREEOpInterfaces.cpp.inc" - -} // namespace IREE +#include "iree/compiler/Dialect/Util/IR/UtilOpInterfaces.cpp.inc" //===----------------------------------------------------------------------===// -// IREEDialect +// IREE::Util::UtilDialect //===----------------------------------------------------------------------===// -void IREEDialect::registerTypes() { - addTypes<IREE::ByteBufferType, IREE::ListType, IREE::MutableByteBufferType, - IREE::PtrType, IREE::VariantType>(); +void UtilDialect::registerTypes() { + addTypes<IREE::Util::ByteBufferType, IREE::Util::ListType, + IREE::Util::MutableByteBufferType, IREE::Util::PtrType, + IREE::Util::VariantType>(); } +} // namespace Util +} // namespace IREE } // namespace iree_compiler } // namespace mlir
diff --git a/iree/compiler/Dialect/IREE/IR/IREETypes.h b/iree/compiler/Dialect/Util/IR/UtilTypes.h similarity index 97% rename from iree/compiler/Dialect/IREE/IR/IREETypes.h rename to iree/compiler/Dialect/Util/IR/UtilTypes.h index 4fbfbbb..7ba9b53 100644 --- a/iree/compiler/Dialect/IREE/IR/IREETypes.h +++ b/iree/compiler/Dialect/Util/IR/UtilTypes.h
@@ -17,6 +17,7 @@ namespace mlir { namespace iree_compiler { namespace IREE { +namespace Util { class TiedOpInterface; @@ -134,11 +135,11 @@ ArrayRef<unsigned> excludedResultIndices, SmallVector<int64_t, 4> &tiedOperandIndices); -#include "iree/compiler/Dialect/IREE/IR/IREEOpInterfaces.h.inc" // IWYU pragma: export - +} // namespace Util } // namespace IREE - } // namespace iree_compiler } // namespace mlir +#include "iree/compiler/Dialect/Util/IR/UtilOpInterfaces.h.inc" // IWYU pragma: export + #endif // IREE_COMPILER_DIALECT_IREE_IR_IREETYPES_H_
diff --git a/iree/compiler/Dialect/IREE/IR/test/BUILD b/iree/compiler/Dialect/Util/IR/test/BUILD similarity index 100% rename from iree/compiler/Dialect/IREE/IR/test/BUILD rename to iree/compiler/Dialect/Util/IR/test/BUILD
diff --git a/iree/compiler/Dialect/IREE/IR/test/CMakeLists.txt b/iree/compiler/Dialect/Util/IR/test/CMakeLists.txt similarity index 93% rename from iree/compiler/Dialect/IREE/IR/test/CMakeLists.txt rename to iree/compiler/Dialect/Util/IR/test/CMakeLists.txt index 63b0079..2150353 100644 --- a/iree/compiler/Dialect/IREE/IR/test/CMakeLists.txt +++ b/iree/compiler/Dialect/Util/IR/test/CMakeLists.txt
@@ -1,6 +1,6 @@ ################################################################################ # Autogenerated by build_tools/bazel_to_cmake/bazel_to_cmake.py from # -# iree/compiler/Dialect/IREE/IR/test/BUILD # +# iree/compiler/Dialect/Util/IR/test/BUILD # # # # Use iree_cmake_extra_content from iree/build_defs.oss.bzl to add arbitrary # # CMake-only content. #
diff --git a/iree/compiler/Dialect/Util/IR/test/byte_buffer_ops.mlir b/iree/compiler/Dialect/Util/IR/test/byte_buffer_ops.mlir new file mode 100644 index 0000000..0baf198 --- /dev/null +++ b/iree/compiler/Dialect/Util/IR/test/byte_buffer_ops.mlir
@@ -0,0 +1,8 @@ +// RUN: iree-opt -split-input-file %s | iree-opt -split-input-file | IreeFileCheck %s + +// CHECK-LABEL: @byte_buffer_constant +func @byte_buffer_constant() -> !util.byte_buffer { + // CHECK: = util.byte_buffer.constant : !util.byte_buffer = dense<[1, 2, 3]> : tensor<3xi32> + %0 = util.byte_buffer.constant : !util.byte_buffer = dense<[1, 2, 3]> : tensor<3xi32> + return %0 : !util.byte_buffer +}
diff --git a/iree/compiler/Dialect/IREE/IR/test/do_not_optimize.mlir b/iree/compiler/Dialect/Util/IR/test/do_not_optimize.mlir similarity index 77% rename from iree/compiler/Dialect/IREE/IR/test/do_not_optimize.mlir rename to iree/compiler/Dialect/Util/IR/test/do_not_optimize.mlir index 98f39c6..9a38ac7 100644 --- a/iree/compiler/Dialect/IREE/IR/test/do_not_optimize.mlir +++ b/iree/compiler/Dialect/Util/IR/test/do_not_optimize.mlir
@@ -4,8 +4,8 @@ func @no_fold_constant() -> (i32) { // CHECK: constant 1 : i32 %0 = constant 1 : i32 - // CHECK: iree.do_not_optimize - %1 = "iree.do_not_optimize"(%0) : (i32) -> i32 + // CHECK: util.do_not_optimize + %1 = "util.do_not_optimize"(%0) : (i32) -> i32 return %1 : i32 } @@ -15,8 +15,8 @@ func @no_fold_add() -> (i32) { // CHECK-NEXT: %[[C1:.+]] = vm.const.i32 1 : i32 %c1 = vm.const.i32 1 : i32 - // CHECK-NEXT: %[[R1:.+]] = iree.do_not_optimize(%[[C1]]) - %0 = iree.do_not_optimize(%c1) : i32 + // CHECK-NEXT: %[[R1:.+]] = util.do_not_optimize(%[[C1]]) + %0 = util.do_not_optimize(%c1) : i32 // CHECK-NEXT: %[[R2:.+]] = vm.add.i32 %[[R1]], %[[R1]] %1 = vm.add.i32 %0, %0 : i32 // CHECK-NEXT: return %[[R2]] @@ -39,7 +39,7 @@ func @result_operand_count_mismatch(%arg0 : tensor<i32>, %arg1 : tensor<i32>) { // expected-error@+1 {{must have same number of operands and results}} - %1 = "iree.do_not_optimize"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32> + %1 = "util.do_not_optimize"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32> return } @@ -47,7 +47,7 @@ func @result_operand_type_mismatch(%arg0 : tensor<i32>, %arg1 : tensor<i32>) { // expected-error@+1 {{must have same operand and result types, but they differ at index 1}} - %1:2 = "iree.do_not_optimize"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> (tensor<i32>, memref<i32>) + %1:2 = "util.do_not_optimize"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> (tensor<i32>, memref<i32>) return } @@ -56,8 +56,8 @@ // CHECK-LABEL: @canonicalize_unfoldable_constant func @canonicalize_unfoldable_constant() -> i32 { // CHECK-NEXT: %[[C:.+]] = constant 42 : i32 - // CHECK-NEXT: %[[R:.+]] = iree.do_not_optimize(%[[C]]) : i32 - %c42 = iree.unfoldable_constant 42 : i32 + // CHECK-NEXT: %[[R:.+]] = util.do_not_optimize(%[[C]]) : i32 + %c42 = util.unfoldable_constant 42 : i32 // CHECK-NEXT: return %[[R]] return %c42 : i32 }
diff --git a/iree/compiler/Dialect/Util/IR/test/list_ops.mlir b/iree/compiler/Dialect/Util/IR/test/list_ops.mlir new file mode 100644 index 0000000..7a7d13c --- /dev/null +++ b/iree/compiler/Dialect/Util/IR/test/list_ops.mlir
@@ -0,0 +1,85 @@ +// RUN: iree-opt -split-input-file %s | iree-opt -split-input-file | IreeFileCheck %s + +// CHECK-LABEL: @list_init_ops +func @list_init_ops() { + // CHECK: %[[CAPACITY:.+]] = constant 5 + %capacity = constant 5 : index + // CHECK: = util.list.create %[[CAPACITY]] : !util.list<?> + %list_initial_capacity = util.list.create %capacity : !util.list<?> + + // CHECK: %[[LIST:.+]] = util.list.create : !util.list<?> + %list = util.list.create : !util.list<?> + + // CHECK: %[[NEW_SIZE:.+]] = constant 100 + %new_size = constant 100 : index + // CHECK: util.list.resize %[[LIST]], %[[NEW_SIZE]] : !util.list<?> + util.list.resize %list, %new_size : !util.list<?> + + return +} + +// ----- + +// CHECK-LABEL: @list_access +// CHECK-SAME: (%[[LIST:.+]]: !util.list<i32>) +func @list_access(%list: !util.list<i32>) { + %c10 = constant 10 : index + + // CHECK: = util.list.get %[[LIST]][%c10] : !util.list<i32> + %0 = util.list.get %list[%c10] : !util.list<i32> + // CHECK: = util.list.get %[[LIST]][%c10] : !util.list<i32> + %1 = util.list.get %list[%c10] : !util.list<i32> -> i32 + + // CHECK: %[[NEW_VALUE:.+]] = constant 100 : i32 + %new_value = constant 100 : i32 + // CHECK: util.list.set %[[LIST]][%c10], %[[NEW_VALUE]] : !util.list<i32> + util.list.set %list[%c10], %new_value : !util.list<i32> + + return +} + +// ----- + +// CHECK-LABEL: @list_access_tensor +// CHECK-SAME: (%[[LIST:.+]]: !util.list<tensor<*xf32>>) +func @list_access_tensor(%list: !util.list<tensor<*xf32>>) { + %c10 = constant 10 : index + + // CHECK: = util.list.get %[[LIST]][%c10] : !util.list<tensor<*xf32>> -> tensor<?xf32> + %0 = util.list.get %list[%c10] : !util.list<tensor<*xf32>> -> tensor<?xf32> + + // CHECK: %[[NEW_VALUE:.+]] = constant dense<1> : tensor<5xi32> + %new_value = constant dense<1> : tensor<5xi32> + // CHECK: util.list.set %[[LIST]][%c10], %[[NEW_VALUE]] : tensor<5xi32> -> !util.list<tensor<*xf32>> + util.list.set %list[%c10], %new_value : tensor<5xi32> -> !util.list<tensor<*xf32>> + + return +} + +// ----- + +// CHECK-LABEL: @list_access_variant +// CHECK-SAME: (%[[LIST:.+]]: !util.list<?>) +func @list_access_variant(%list: !util.list<?>) { + %c10 = constant 10 : index + %c11 = constant 11 : index + + // CHECK: = util.list.get %[[LIST]][%c10] : !util.list<?> -> i32 + %0 = util.list.get %list[%c10] : !util.list<?> -> i32 + + // CHECK: %[[NEW_I32_VALUE:.+]] = constant 100 : i32 + %new_i32_value = constant 100 : i32 + // CHECK: util.list.set %[[LIST]][%c10], %[[NEW_I32_VALUE]] : i32 -> !util.list<?> + util.list.set %list[%c10], %new_i32_value : i32 -> !util.list<?> + + // CHECK: = util.list.get %[[LIST]][%c11] : !util.list<?> -> tensor<5xf32> + %1 = util.list.get %list[%c11] : !util.list<?> -> tensor<5xf32> + + // CHECK: %[[NEW_TENSOR_VALUE:.+]] = constant dense<1> : tensor<5xi32> + %new_tensor_value = constant dense<1> : tensor<5xi32> + // CHECK: util.list.set %[[LIST]][%c11], %[[NEW_TENSOR_VALUE]] : tensor<5xi32> -> !util.list<?> + util.list.set %list[%c11], %new_tensor_value : tensor<5xi32> -> !util.list<?> + + return +} +
diff --git a/iree/compiler/Dialect/Util/IR/test/parse_print.mlir b/iree/compiler/Dialect/Util/IR/test/parse_print.mlir new file mode 100644 index 0000000..2f51327 --- /dev/null +++ b/iree/compiler/Dialect/Util/IR/test/parse_print.mlir
@@ -0,0 +1,48 @@ +// RUN: iree-opt -split-input-file %s | iree-opt -split-input-file | IreeFileCheck %s + +// CHECK-LABEL: @parse_print_do_not_optimize +// CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] +// CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] +func @parse_print_do_not_optimize(%arg0 : tensor<i32>, %arg1 : tensor<i32>) { + // CHECK: util.do_not_optimize() + util.do_not_optimize() + + // CHECK-NEXT: util.do_not_optimize(%[[ARG0]]) : tensor<i32> + %1 = util.do_not_optimize(%arg0) : tensor<i32> + + // CHECK-NEXT: util.do_not_optimize(%[[ARG0]], %[[ARG1]]) : tensor<i32>, tensor<i32> + %2:2 = util.do_not_optimize(%arg0, %arg1) : tensor<i32>, tensor<i32> + + // CHECK-NEXT: util.do_not_optimize(%[[ARG0]]) {some_unit} : tensor<i32> + %has_attr = util.do_not_optimize(%arg0) {some_unit} : tensor<i32> + + return +} + +// ----- + +// CHECK-LABEL: @parse_print_unfoldable_constant +func @parse_print_unfoldable_constant(%arg0 : tensor<i32>, %arg1 : tensor<i32>) { + // CHECK-NEXT: util.unfoldable_constant 42 + %c42 = util.unfoldable_constant 42 : i32 + + // CHECK: util.unfoldable_constant {attr = "foo"} 43 : i32 + %cattr = util.unfoldable_constant {attr = "foo"} 43 : i32 + + // CHECK: util.unfoldable_constant @func_with_args : (f32) -> () + %csymref = util.unfoldable_constant @func_with_args : (f32) -> () + + return +} + +// ----- + +// CHECK-LABEL: @parse_print_dynamic_shape_constant +func @parse_print_dynamic_shape_constant() { + // CHECK-NEXT: util.dynamic_shape_constant dense<2> : tensor<2xi32> -> tensor<?xi32> + %c = util.dynamic_shape_constant dense<2> : tensor<2xi32> -> tensor<?xi32> + + // CHECK-NEXT: util.dynamic_shape_constant dense<2> : tensor<2xi32> {attr = "foo"} -> tensor<?xi32> + %has_attr = util.dynamic_shape_constant dense<2> : tensor<2xi32> {attr = "foo"} -> tensor<?xi32> + return +}
diff --git a/iree/compiler/Dialect/IREE/Tools/BUILD b/iree/compiler/Dialect/Util/Tools/BUILD similarity index 100% rename from iree/compiler/Dialect/IREE/Tools/BUILD rename to iree/compiler/Dialect/Util/Tools/BUILD
diff --git a/iree/compiler/Dialect/IREE/Tools/CMakeLists.txt b/iree/compiler/Dialect/Util/Tools/CMakeLists.txt similarity index 100% rename from iree/compiler/Dialect/IREE/Tools/CMakeLists.txt rename to iree/compiler/Dialect/Util/Tools/CMakeLists.txt
diff --git a/iree/compiler/Dialect/IREE/Tools/StructAttrGen.cpp b/iree/compiler/Dialect/Util/Tools/StructAttrGen.cpp similarity index 97% rename from iree/compiler/Dialect/IREE/Tools/StructAttrGen.cpp rename to iree/compiler/Dialect/Util/Tools/StructAttrGen.cpp index 5942e9c..daf4e09 100644 --- a/iree/compiler/Dialect/IREE/Tools/StructAttrGen.cpp +++ b/iree/compiler/Dialect/Util/Tools/StructAttrGen.cpp
@@ -31,8 +31,8 @@ class StructFieldAttr { public: explicit StructFieldAttr(const llvm::Record *record) : def(record) { - assert(def->isSubClassOf("IREE_StructFieldAttr") && - "must be subclass of TableGen 'IREE_StructFieldAttr' class"); + assert(def->isSubClassOf("Util_StructFieldAttr") && + "must be subclass of TableGen 'Util_StructFieldAttr' class"); } explicit StructFieldAttr(const llvm::Record &record) : StructFieldAttr(&record) {} @@ -52,8 +52,8 @@ class StructAttr : public Attribute { public: explicit StructAttr(const llvm::Record *record) : Attribute(record) { - assert(isSubClassOf("IREE_StructAttr") && - "must be subclass of TableGen 'IREE_StructAttr' class"); + assert(isSubClassOf("Util_StructAttr") && + "must be subclass of TableGen 'Util_StructAttr' class"); } explicit StructAttr(const llvm::Record &record) : StructAttr(&record) {} explicit StructAttr(const llvm::DefInit *init) : StructAttr(init->getDef()) {} @@ -194,7 +194,7 @@ static bool emitStructDecls(const RecordKeeper &recordKeeper, raw_ostream &os) { llvm::emitSourceFileHeader("Struct Attr Declarations", os); - auto defs = recordKeeper.getAllDerivedDefinitions("IREE_StructAttr"); + auto defs = recordKeeper.getAllDerivedDefinitions("Util_StructAttr"); for (const auto *def : defs) { emitStructDecl(*def, os); } @@ -456,7 +456,7 @@ static bool emitStructDefs(const RecordKeeper &recordKeeper, raw_ostream &os) { llvm::emitSourceFileHeader("Struct Attr Definitions", os); - auto defs = recordKeeper.getAllDerivedDefinitions("IREE_StructAttr"); + auto defs = recordKeeper.getAllDerivedDefinitions("Util_StructAttr"); for (const auto *def : defs) { emitStructDef(*def, os); }
diff --git a/iree/compiler/Dialect/IREE/Transforms/BUILD b/iree/compiler/Dialect/Util/Transforms/BUILD similarity index 92% rename from iree/compiler/Dialect/IREE/Transforms/BUILD rename to iree/compiler/Dialect/Util/Transforms/BUILD index 78a1abf..003e977 100644 --- a/iree/compiler/Dialect/IREE/Transforms/BUILD +++ b/iree/compiler/Dialect/Util/Transforms/BUILD
@@ -17,7 +17,7 @@ "Passes.h", ], deps = [ - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//mlir:IR", "@llvm-project//mlir:Pass", ],
diff --git a/iree/compiler/Dialect/IREE/Transforms/CMakeLists.txt b/iree/compiler/Dialect/Util/Transforms/CMakeLists.txt similarity index 89% rename from iree/compiler/Dialect/IREE/Transforms/CMakeLists.txt rename to iree/compiler/Dialect/Util/Transforms/CMakeLists.txt index 68fca45..77359d3 100644 --- a/iree/compiler/Dialect/IREE/Transforms/CMakeLists.txt +++ b/iree/compiler/Dialect/Util/Transforms/CMakeLists.txt
@@ -1,6 +1,6 @@ ################################################################################ # Autogenerated by build_tools/bazel_to_cmake/bazel_to_cmake.py from # -# iree/compiler/Dialect/IREE/Transforms/BUILD # +# iree/compiler/Dialect/Util/Transforms/BUILD # # # # Use iree_cmake_extra_content from iree/build_defs.oss.bzl to add arbitrary # # CMake-only content. # @@ -20,7 +20,7 @@ DEPS MLIRIR MLIRPass - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/Dialect/IREE/Transforms/DropCompilerHints.cpp b/iree/compiler/Dialect/Util/Transforms/DropCompilerHints.cpp similarity index 83% rename from iree/compiler/Dialect/IREE/Transforms/DropCompilerHints.cpp rename to iree/compiler/Dialect/Util/Transforms/DropCompilerHints.cpp index 21f9396..c18a62b 100644 --- a/iree/compiler/Dialect/IREE/Transforms/DropCompilerHints.cpp +++ b/iree/compiler/Dialect/Util/Transforms/DropCompilerHints.cpp
@@ -6,18 +6,21 @@ #include <utility> -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" -#include "iree/compiler/Dialect/IREE/Transforms/Passes.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" +#include "iree/compiler/Dialect/Util/Transforms/Passes.h" #include "mlir/Pass/Pass.h" namespace mlir { namespace iree_compiler { namespace IREE { +namespace Util { class DropCompilerHintsPass : public PassWrapper<DropCompilerHintsPass, OperationPass<void>> { public: - StringRef getArgument() const override { return "iree-drop-compiler-hints"; } + StringRef getArgument() const override { + return "iree-util-drop-compiler-hints"; + } StringRef getDescription() const override { return "Deletes operations that have no runtime equivalent and are only " @@ -41,6 +44,7 @@ static PassRegistration<DropCompilerHintsPass> pass; +} // namespace Util } // namespace IREE } // namespace iree_compiler } // namespace mlir
diff --git a/iree/compiler/Dialect/IREE/Transforms/Passes.h b/iree/compiler/Dialect/Util/Transforms/Passes.h similarity index 94% rename from iree/compiler/Dialect/IREE/Transforms/Passes.h rename to iree/compiler/Dialect/Util/Transforms/Passes.h index 43ad165..5a06d8a 100644 --- a/iree/compiler/Dialect/IREE/Transforms/Passes.h +++ b/iree/compiler/Dialect/Util/Transforms/Passes.h
@@ -12,12 +12,14 @@ namespace mlir { namespace iree_compiler { namespace IREE { +namespace Util { std::unique_ptr<OperationPass<void>> createDropCompilerHintsPass(); // Register all Passes inline void registerTransformPasses() { createDropCompilerHintsPass(); } +} // namespace Util } // namespace IREE } // namespace iree_compiler } // namespace mlir
diff --git a/iree/compiler/Dialect/IREE/Transforms/test/BUILD b/iree/compiler/Dialect/Util/Transforms/test/BUILD similarity index 100% rename from iree/compiler/Dialect/IREE/Transforms/test/BUILD rename to iree/compiler/Dialect/Util/Transforms/test/BUILD
diff --git a/iree/compiler/Dialect/IREE/Transforms/test/CMakeLists.txt b/iree/compiler/Dialect/Util/Transforms/test/CMakeLists.txt similarity index 93% rename from iree/compiler/Dialect/IREE/Transforms/test/CMakeLists.txt rename to iree/compiler/Dialect/Util/Transforms/test/CMakeLists.txt index 3638b36..4da7fa0 100644 --- a/iree/compiler/Dialect/IREE/Transforms/test/CMakeLists.txt +++ b/iree/compiler/Dialect/Util/Transforms/test/CMakeLists.txt
@@ -1,6 +1,6 @@ ################################################################################ # Autogenerated by build_tools/bazel_to_cmake/bazel_to_cmake.py from # -# iree/compiler/Dialect/IREE/Transforms/test/BUILD # +# iree/compiler/Dialect/Util/Transforms/test/BUILD # # # # Use iree_cmake_extra_content from iree/build_defs.oss.bzl to add arbitrary # # CMake-only content. #
diff --git a/iree/compiler/Dialect/IREE/Transforms/test/drop_compiler_hints.mlir b/iree/compiler/Dialect/Util/Transforms/test/drop_compiler_hints.mlir similarity index 76% rename from iree/compiler/Dialect/IREE/Transforms/test/drop_compiler_hints.mlir rename to iree/compiler/Dialect/Util/Transforms/test/drop_compiler_hints.mlir index 39c18ce..e158da0 100644 --- a/iree/compiler/Dialect/IREE/Transforms/test/drop_compiler_hints.mlir +++ b/iree/compiler/Dialect/Util/Transforms/test/drop_compiler_hints.mlir
@@ -1,4 +1,4 @@ -// RUN: iree-opt -split-input-file -iree-drop-compiler-hints %s | IreeFileCheck --implicit-check-not="iree.do_not_optimize" %s +// RUN: iree-opt -split-input-file -iree-util-drop-compiler-hints %s | IreeFileCheck --implicit-check-not="util.do_not_optimize" %s // This file is used as an example in docs/developing_iree/developer_overview.md. // If you move or delete it, please update the documentation accordingly. @@ -7,7 +7,7 @@ func @constant() -> i32 { // CHECK-NEXT: %[[C1:.+]] = constant 1 %c1 = constant 1 : i32 - %0 = iree.do_not_optimize(%c1) : i32 + %0 = util.do_not_optimize(%c1) : i32 // CHECK-NEXT: return %[[C1]] return %0 : i32 } @@ -18,12 +18,12 @@ func @multiple() -> (i32, i32) { // CHECK-NEXT: %[[C1:.+]] = constant 1 %c1 = constant 1 : i32 - %0 = iree.do_not_optimize(%c1) : i32 - %1 = iree.do_not_optimize(%0) : i32 + %0 = util.do_not_optimize(%c1) : i32 + %1 = util.do_not_optimize(%0) : i32 // CHECK-NEXT: %[[C2:.+]] = constant 2 %c2 = constant 2 : i32 - %2 = iree.do_not_optimize(%1) : i32 - %3 = iree.do_not_optimize(%c2) : i32 + %2 = util.do_not_optimize(%1) : i32 + %3 = util.do_not_optimize(%c2) : i32 // CHECK-NEXT: return %[[C1]], %[[C2]] return %2, %3 : i32, i32 } @@ -36,7 +36,7 @@ %c1 = constant 1 : i32 // CHECK-NEXT: %[[C2:.+]] = constant 2 %c2 = constant 2 : i32 - %0, %1 = iree.do_not_optimize(%c1, %c2) : i32, i32 + %0, %1 = util.do_not_optimize(%c1, %c2) : i32, i32 // CHECK-NEXT: return %[[C1]], %[[C2]] return %0, %1 : i32, i32 } @@ -45,7 +45,7 @@ // CHECK-LABEL: @no_operands func @no_operands() { - iree.do_not_optimize() + util.do_not_optimize() // CHECK-NEXT: return return } @@ -56,7 +56,7 @@ func @no_fold_add() -> (i32) { // CHECK-NEXT: %[[C1:.+]] = vm.const.i32 1 : i32 %c1 = vm.const.i32 1 : i32 - %0 = iree.do_not_optimize(%c1) : i32 + %0 = util.do_not_optimize(%c1) : i32 // CHECK-NEXT: %[[R:.+]] = vm.add.i32 %[[C1]], %[[C1]] %1 = vm.add.i32 %0, %0 : i32 // CHECK-NEXT: return %[[R]] @@ -73,7 +73,7 @@ module @inner { // CHECK-LABEL: @no_operands func @no_operands() { - iree.do_not_optimize() + util.do_not_optimize() // CHECK-NEXT: return return }
diff --git a/iree/compiler/Dialect/VM/Analysis/BUILD b/iree/compiler/Dialect/VM/Analysis/BUILD index 6117718..dd4d835 100644 --- a/iree/compiler/Dialect/VM/Analysis/BUILD +++ b/iree/compiler/Dialect/VM/Analysis/BUILD
@@ -18,7 +18,7 @@ "ValueLiveness.h", ], deps = [ - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Dialect/VM/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:Analysis",
diff --git a/iree/compiler/Dialect/VM/Analysis/CMakeLists.txt b/iree/compiler/Dialect/VM/Analysis/CMakeLists.txt index b6dd3f8..c0000f0 100644 --- a/iree/compiler/Dialect/VM/Analysis/CMakeLists.txt +++ b/iree/compiler/Dialect/VM/Analysis/CMakeLists.txt
@@ -28,7 +28,7 @@ MLIRIR MLIRPass MLIRSupport - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR iree::compiler::Dialect::VM::IR PUBLIC )
diff --git a/iree/compiler/Dialect/VM/Analysis/RegisterAllocation.cpp b/iree/compiler/Dialect/VM/Analysis/RegisterAllocation.cpp index 6d60b7e..8b48fbf 100644 --- a/iree/compiler/Dialect/VM/Analysis/RegisterAllocation.cpp +++ b/iree/compiler/Dialect/VM/Analysis/RegisterAllocation.cpp
@@ -10,7 +10,7 @@ #include <map> #include <utility> -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/STLExtras.h"
diff --git a/iree/compiler/Dialect/VM/Analysis/ValueLiveness.cpp b/iree/compiler/Dialect/VM/Analysis/ValueLiveness.cpp index cd5a89b..1304079 100644 --- a/iree/compiler/Dialect/VM/Analysis/ValueLiveness.cpp +++ b/iree/compiler/Dialect/VM/Analysis/ValueLiveness.cpp
@@ -9,7 +9,7 @@ #include <algorithm> #include <cstring> -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h"
diff --git a/iree/compiler/Dialect/VM/Conversion/BUILD b/iree/compiler/Dialect/VM/Conversion/BUILD index 9f8c22d..a065da6 100644 --- a/iree/compiler/Dialect/VM/Conversion/BUILD +++ b/iree/compiler/Dialect/VM/Conversion/BUILD
@@ -26,8 +26,8 @@ "TypeConverter.h", ], deps = [ - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Shape/IR", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Dialect/VM/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR",
diff --git a/iree/compiler/Dialect/VM/Conversion/CMakeLists.txt b/iree/compiler/Dialect/VM/Conversion/CMakeLists.txt index 665573c..fd8fc39 100644 --- a/iree/compiler/Dialect/VM/Conversion/CMakeLists.txt +++ b/iree/compiler/Dialect/VM/Conversion/CMakeLists.txt
@@ -30,8 +30,8 @@ MLIRParser MLIRStandard MLIRTransforms - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Shape::IR + iree::compiler::Dialect::Util::IR iree::compiler::Dialect::VM::IR PUBLIC )
diff --git a/iree/compiler/Dialect/VM/Conversion/IREEToVM/BUILD b/iree/compiler/Dialect/VM/Conversion/IREEToVM/BUILD index 5cefca0..0d9f803 100644 --- a/iree/compiler/Dialect/VM/Conversion/IREEToVM/BUILD +++ b/iree/compiler/Dialect/VM/Conversion/IREEToVM/BUILD
@@ -19,7 +19,7 @@ "ConvertIREEToVM.h", ], deps = [ - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Dialect/VM/Conversion", "//iree/compiler/Dialect/VM/IR", "@llvm-project//mlir:IR",
diff --git a/iree/compiler/Dialect/VM/Conversion/IREEToVM/CMakeLists.txt b/iree/compiler/Dialect/VM/Conversion/IREEToVM/CMakeLists.txt index 5961c2f..075541c 100644 --- a/iree/compiler/Dialect/VM/Conversion/IREEToVM/CMakeLists.txt +++ b/iree/compiler/Dialect/VM/Conversion/IREEToVM/CMakeLists.txt
@@ -23,7 +23,7 @@ MLIRStandard MLIRTransformUtils MLIRTransforms - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR iree::compiler::Dialect::VM::Conversion iree::compiler::Dialect::VM::IR PUBLIC
diff --git a/iree/compiler/Dialect/VM/Conversion/IREEToVM/ConvertIREEToVM.cpp b/iree/compiler/Dialect/VM/Conversion/IREEToVM/ConvertIREEToVM.cpp index 20335ba..97240aa 100644 --- a/iree/compiler/Dialect/VM/Conversion/IREEToVM/ConvertIREEToVM.cpp +++ b/iree/compiler/Dialect/VM/Conversion/IREEToVM/ConvertIREEToVM.cpp
@@ -6,8 +6,8 @@ #include "iree/compiler/Dialect/VM/Conversion/IREEToVM/ConvertIREEToVM.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree/compiler/Dialect/VM/Conversion/TypeConverter.h" #include "iree/compiler/Dialect/VM/IR/VMOps.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" @@ -22,14 +22,14 @@ namespace { //===----------------------------------------------------------------------===// -// iree.null +// util.null //===----------------------------------------------------------------------===// -class NullOpConversion : public OpConversionPattern<IREE::NullOp> { +class NullOpConversion : public OpConversionPattern<IREE::Util::NullOp> { using OpConversionPattern::OpConversionPattern; LogicalResult matchAndRewrite( - IREE::NullOp op, ArrayRef<Value> operands, + IREE::Util::NullOp op, ArrayRef<Value> operands, ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp<IREE::VM::ConstRefZeroOp>( op, IREE::VM::RefType::get(op.getType())); @@ -38,15 +38,15 @@ }; //===----------------------------------------------------------------------===// -// iree.byte_buffer.* +// util.byte_buffer.* //===----------------------------------------------------------------------===// class ByteBufferConstantOpConversion - : public OpConversionPattern<IREE::ByteBufferConstantOp> { + : public OpConversionPattern<IREE::Util::ByteBufferConstantOp> { using OpConversionPattern::OpConversionPattern; LogicalResult matchAndRewrite( - IREE::ByteBufferConstantOp op, ArrayRef<Value> operands, + IREE::Util::ByteBufferConstantOp op, ArrayRef<Value> operands, ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp<IREE::VM::RodataInlineOp>( op, @@ -62,16 +62,17 @@ //===----------------------------------------------------------------------===// class UnreachableOpConversion - : public OpConversionPattern<IREE::UnreachableOp> { + : public OpConversionPattern<IREE::Util::UnreachableOp> { using OpConversionPattern::OpConversionPattern; LogicalResult matchAndRewrite( - IREE::UnreachableOp srcOp, ArrayRef<Value> operands, + IREE::Util::UnreachableOp srcOp, ArrayRef<Value> operands, ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp<IREE::VM::FailOp>( srcOp, rewriter.createOrFold<IREE::VM::ConstI32Op>( - srcOp.getLoc(), static_cast<int32_t>(IREE::StatusCode::Unknown)), + srcOp.getLoc(), + static_cast<int32_t>(IREE::Util::StatusCode::Unknown)), srcOp.message()); return success(); } @@ -81,12 +82,13 @@ // Lists //===----------------------------------------------------------------------===// -class ListCreateOpConversion : public OpConversionPattern<IREE::ListCreateOp> { +class ListCreateOpConversion + : public OpConversionPattern<IREE::Util::ListCreateOp> { using OpConversionPattern::OpConversionPattern; LogicalResult matchAndRewrite( - IREE::ListCreateOp srcOp, ArrayRef<Value> operands, + IREE::Util::ListCreateOp srcOp, ArrayRef<Value> operands, ConversionPatternRewriter &rewriter) const override { - IREE::ListCreateOpAdaptor srcOperands(operands); + IREE::Util::ListCreateOpAdaptor srcOperands(operands); rewriter.replaceOpWithNewOp<IREE::VM::ListAllocOp>( srcOp, typeConverter->convertType(srcOp.result().getType()), srcOperands.initial_capacity()); @@ -94,12 +96,13 @@ } }; -class ListSizeOpConversion : public OpConversionPattern<IREE::ListSizeOp> { +class ListSizeOpConversion + : public OpConversionPattern<IREE::Util::ListSizeOp> { using OpConversionPattern::OpConversionPattern; LogicalResult matchAndRewrite( - IREE::ListSizeOp srcOp, ArrayRef<Value> operands, + IREE::Util::ListSizeOp srcOp, ArrayRef<Value> operands, ConversionPatternRewriter &rewriter) const override { - IREE::ListSizeOpAdaptor srcOperands(operands); + IREE::Util::ListSizeOpAdaptor srcOperands(operands); rewriter.replaceOpWithNewOp<IREE::VM::ListSizeOp>( srcOp, typeConverter->convertType(srcOp.result().getType()), srcOperands.list()); @@ -107,24 +110,25 @@ } }; -class ListResizeOpConversion : public OpConversionPattern<IREE::ListResizeOp> { +class ListResizeOpConversion + : public OpConversionPattern<IREE::Util::ListResizeOp> { using OpConversionPattern::OpConversionPattern; LogicalResult matchAndRewrite( - IREE::ListResizeOp srcOp, ArrayRef<Value> operands, + IREE::Util::ListResizeOp srcOp, ArrayRef<Value> operands, ConversionPatternRewriter &rewriter) const override { - IREE::ListResizeOpAdaptor srcOperands(operands); + IREE::Util::ListResizeOpAdaptor srcOperands(operands); rewriter.replaceOpWithNewOp<IREE::VM::ListResizeOp>( srcOp, srcOperands.list(), srcOperands.new_size()); return success(); } }; -class ListGetOpConversion : public OpConversionPattern<IREE::ListGetOp> { +class ListGetOpConversion : public OpConversionPattern<IREE::Util::ListGetOp> { using OpConversionPattern::OpConversionPattern; LogicalResult matchAndRewrite( - IREE::ListGetOp srcOp, ArrayRef<Value> operands, + IREE::Util::ListGetOp srcOp, ArrayRef<Value> operands, ConversionPatternRewriter &rewriter) const override { - IREE::ListGetOpAdaptor srcOperands(operands); + IREE::Util::ListGetOpAdaptor srcOperands(operands); auto resultType = typeConverter->convertType(srcOp.result().getType()); if (resultType.isInteger(32)) { rewriter.replaceOpWithNewOp<IREE::VM::ListGetI32Op>( @@ -148,12 +152,12 @@ } }; -class ListSetOpConversion : public OpConversionPattern<IREE::ListSetOp> { +class ListSetOpConversion : public OpConversionPattern<IREE::Util::ListSetOp> { using OpConversionPattern::OpConversionPattern; LogicalResult matchAndRewrite( - IREE::ListSetOp srcOp, ArrayRef<Value> operands, + IREE::Util::ListSetOp srcOp, ArrayRef<Value> operands, ConversionPatternRewriter &rewriter) const override { - IREE::ListSetOpAdaptor srcOperands(operands); + IREE::Util::ListSetOpAdaptor srcOperands(operands); auto valueType = srcOperands.value().getType(); if (valueType.isInteger(32)) { rewriter.replaceOpWithNewOp<IREE::VM::ListSetI32Op>( @@ -186,19 +190,21 @@ patterns.insert<ByteBufferConstantOpConversion>(typeConverter, context); patterns.insert<UnreachableOpConversion>(typeConverter, context); - typeConverter.addConversion([](IREE::ByteBufferType type) -> Optional<Type> { - return IREE::VM::RefType::get(IREE::VM::BufferType::get(type.getContext())); - }); typeConverter.addConversion( - [](IREE::MutableByteBufferType type) -> Optional<Type> { + [](IREE::Util::ByteBufferType type) -> Optional<Type> { + return IREE::VM::RefType::get( + IREE::VM::BufferType::get(type.getContext())); + }); + typeConverter.addConversion( + [](IREE::Util::MutableByteBufferType type) -> Optional<Type> { return IREE::VM::RefType::get( IREE::VM::BufferType::get(type.getContext())); }); typeConverter.addConversion( - [&typeConverter](IREE::ListType type) -> Optional<Type> { + [&typeConverter](IREE::Util::ListType type) -> Optional<Type> { Type elementType; - if (type.getElementType().isa<IREE::VariantType>()) { + if (type.getElementType().isa<IREE::Util::VariantType>()) { elementType = IREE::VM::OpaqueType::get(type.getContext()); } else { elementType = typeConverter.convertType(type.getElementType());
diff --git a/iree/compiler/Dialect/VM/Conversion/IREEToVM/test/byte_buffer_ops.mlir b/iree/compiler/Dialect/VM/Conversion/IREEToVM/test/byte_buffer_ops.mlir index ec74199..33c0316 100644 --- a/iree/compiler/Dialect/VM/Conversion/IREEToVM/test/byte_buffer_ops.mlir +++ b/iree/compiler/Dialect/VM/Conversion/IREEToVM/test/byte_buffer_ops.mlir
@@ -3,10 +3,10 @@ // CHECK-LABEL: @byte_buffer_constant module @byte_buffer_constant { module { - // CHECK: vm.func @my_fn + // CHECK: vm.func private @my_fn func @my_fn() { // CHECK-NEXT: = vm.rodata.inline : !vm.buffer = dense<[1, 2, 3]> : tensor<3xi32> - %0 = iree.byte_buffer.constant : !iree.byte_buffer = dense<[1, 2, 3]> : tensor<3xi32> + %0 = util.byte_buffer.constant : !util.byte_buffer = dense<[1, 2, 3]> : tensor<3xi32> return } }
diff --git a/iree/compiler/Dialect/VM/Conversion/IREEToVM/test/hint_ops.mlir b/iree/compiler/Dialect/VM/Conversion/IREEToVM/test/hint_ops.mlir index 9b4eb2d..9fee399 100644 --- a/iree/compiler/Dialect/VM/Conversion/IREEToVM/test/hint_ops.mlir +++ b/iree/compiler/Dialect/VM/Conversion/IREEToVM/test/hint_ops.mlir
@@ -3,11 +3,11 @@ // CHECK-LABEL: @unreachable_block module @unreachable_block { module { - // CHECK: vm.func @my_fn + // CHECK: vm.func private @my_fn func @my_fn() { // CHECK-NEXT: %[[CODE:.+]] = vm.const.i32 2 // CHECK-NEXT: vm.fail %[[CODE]], "nope!" - iree.unreachable "nope!" + util.unreachable "nope!" } } }
diff --git a/iree/compiler/Dialect/VM/Conversion/IREEToVM/test/list_ops.mlir b/iree/compiler/Dialect/VM/Conversion/IREEToVM/test/list_ops.mlir index ea4b92d..5fa0916 100644 --- a/iree/compiler/Dialect/VM/Conversion/IREEToVM/test/list_ops.mlir +++ b/iree/compiler/Dialect/VM/Conversion/IREEToVM/test/list_ops.mlir
@@ -2,35 +2,35 @@ // CHECK-LABEL: @list_ops module @list_ops { module { - // CHECK: vm.func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: (%[[BUFFER_VIEW:.+]]: !vm.ref<!hal.buffer_view>) func @my_fn(%buffer_view: !hal.buffer_view) { // CHECK: %[[CAPACITY:.+]] = vm.const.i32 5 %capacity = constant 5 : index // CHECK: %[[LIST:.+]] = vm.list.alloc %[[CAPACITY]] : (i32) -> !vm.list<?> - %list = iree.list.create %capacity : !iree.list<?> + %list = util.list.create %capacity : !util.list<?> // CHECK: %[[NEW_SIZE:.+]] = vm.const.i32 100 %new_size = constant 100 : index // CHECK: vm.list.resize %[[LIST]], %[[NEW_SIZE]] : (!vm.list<?>, i32) - iree.list.resize %list, %new_size : !iree.list<?> + util.list.resize %list, %new_size : !util.list<?> %c10 = constant 10 : index %c11 = constant 11 : index // CHECK: = vm.list.get.i32 %[[LIST]], %c10 : (!vm.list<?>, i32) -> i32 - %0 = iree.list.get %list[%c10] : !iree.list<?> -> i32 + %0 = util.list.get %list[%c10] : !util.list<?> -> i32 // CHECK: %[[NEW_I32_VALUE:.+]] = vm.const.i32 101 %new_i32_value = constant 101 : i32 // CHECK: vm.list.set.i32 %[[LIST]], %c10, %[[NEW_I32_VALUE]] : (!vm.list<?>, i32, i32) - iree.list.set %list[%c10], %new_i32_value : i32 -> !iree.list<?> + util.list.set %list[%c10], %new_i32_value : i32 -> !util.list<?> // CHECK: = vm.list.get.ref %[[LIST]], %c11 : (!vm.list<?>, i32) -> !vm.ref<!hal.buffer_view> - %1 = iree.list.get %list[%c11] : !iree.list<?> -> !hal.buffer_view + %1 = util.list.get %list[%c11] : !util.list<?> -> !hal.buffer_view // CHECK: vm.list.set.ref %[[LIST]], %c11, %[[BUFFER_VIEW]] : (!vm.list<?>, i32, !vm.ref<!hal.buffer_view>) - iree.list.set %list[%c11], %buffer_view : !hal.buffer_view -> !iree.list<?> + util.list.set %list[%c11], %buffer_view : !hal.buffer_view -> !util.list<?> return }
diff --git a/iree/compiler/Dialect/VM/Conversion/ImportUtils.h b/iree/compiler/Dialect/VM/Conversion/ImportUtils.h index 8d26600..7381461 100644 --- a/iree/compiler/Dialect/VM/Conversion/ImportUtils.h +++ b/iree/compiler/Dialect/VM/Conversion/ImportUtils.h
@@ -7,9 +7,9 @@ #ifndef IREE_COMPILER_DIALECT_VM_CONVERSION_IMPORTUTILS_H_ #define IREE_COMPILER_DIALECT_VM_CONVERSION_IMPORTUTILS_H_ -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" #include "iree/compiler/Dialect/Shape/IR/ShapeOps.h" #include "iree/compiler/Dialect/Shape/IR/ShapeTypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree/compiler/Dialect/VM/IR/VMOps.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Attributes.h"
diff --git a/iree/compiler/Dialect/VM/Conversion/MathToVM/BUILD b/iree/compiler/Dialect/VM/Conversion/MathToVM/BUILD index c0b71f0..62e8706 100644 --- a/iree/compiler/Dialect/VM/Conversion/MathToVM/BUILD +++ b/iree/compiler/Dialect/VM/Conversion/MathToVM/BUILD
@@ -19,7 +19,7 @@ "ConvertMathToVM.h", ], deps = [ - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Dialect/VM/Conversion", "//iree/compiler/Dialect/VM/IR", "@llvm-project//mlir:IR",
diff --git a/iree/compiler/Dialect/VM/Conversion/MathToVM/CMakeLists.txt b/iree/compiler/Dialect/VM/Conversion/MathToVM/CMakeLists.txt index 2ef9a34..a8bf122 100644 --- a/iree/compiler/Dialect/VM/Conversion/MathToVM/CMakeLists.txt +++ b/iree/compiler/Dialect/VM/Conversion/MathToVM/CMakeLists.txt
@@ -24,7 +24,7 @@ MLIRStandard MLIRTransformUtils MLIRTransforms - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR iree::compiler::Dialect::VM::Conversion iree::compiler::Dialect::VM::IR PUBLIC
diff --git a/iree/compiler/Dialect/VM/Conversion/MathToVM/ConvertMathToVM.cpp b/iree/compiler/Dialect/VM/Conversion/MathToVM/ConvertMathToVM.cpp index fcca78c..1910d9a 100644 --- a/iree/compiler/Dialect/VM/Conversion/MathToVM/ConvertMathToVM.cpp +++ b/iree/compiler/Dialect/VM/Conversion/MathToVM/ConvertMathToVM.cpp
@@ -6,7 +6,7 @@ #include "iree/compiler/Dialect/VM/Conversion/MathToVM/ConvertMathToVM.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree/compiler/Dialect/VM/Conversion/TargetOptions.h" #include "iree/compiler/Dialect/VM/Conversion/TypeConverter.h" #include "iree/compiler/Dialect/VM/IR/VMOps.h"
diff --git a/iree/compiler/Dialect/VM/Conversion/MathToVM/test/arithmetic_ops.mlir b/iree/compiler/Dialect/VM/Conversion/MathToVM/test/arithmetic_ops.mlir index 0c31ed8..ba3bd00 100644 --- a/iree/compiler/Dialect/VM/Conversion/MathToVM/test/arithmetic_ops.mlir +++ b/iree/compiler/Dialect/VM/Conversion/MathToVM/test/arithmetic_ops.mlir
@@ -1,7 +1,7 @@ // RUN: iree-opt -split-input-file -iree-vm-conversion -iree-vm-target-extension=f32 %s | IreeFileCheck %s module { - // CHECK-LABEL: func @arithmetic + // CHECK-LABEL: vm.func private @arithmetic func @arithmetic(%arg0: f32) -> f32 { // CHECK: vm.atan.f32
diff --git a/iree/compiler/Dialect/VM/Conversion/MemRefToVM/BUILD b/iree/compiler/Dialect/VM/Conversion/MemRefToVM/BUILD index 4aaf209..9987107 100644 --- a/iree/compiler/Dialect/VM/Conversion/MemRefToVM/BUILD +++ b/iree/compiler/Dialect/VM/Conversion/MemRefToVM/BUILD
@@ -19,7 +19,7 @@ "ConvertMemRefToVM.h", ], deps = [ - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Dialect/VM/Conversion", "//iree/compiler/Dialect/VM/IR", "@llvm-project//mlir:Affine",
diff --git a/iree/compiler/Dialect/VM/Conversion/MemRefToVM/CMakeLists.txt b/iree/compiler/Dialect/VM/Conversion/MemRefToVM/CMakeLists.txt index e72d5a9..1a45c14 100644 --- a/iree/compiler/Dialect/VM/Conversion/MemRefToVM/CMakeLists.txt +++ b/iree/compiler/Dialect/VM/Conversion/MemRefToVM/CMakeLists.txt
@@ -25,7 +25,7 @@ MLIRStandard MLIRTransformUtils MLIRTransforms - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR iree::compiler::Dialect::VM::Conversion iree::compiler::Dialect::VM::IR PUBLIC
diff --git a/iree/compiler/Dialect/VM/Conversion/MemRefToVM/ConvertMemRefToVM.cpp b/iree/compiler/Dialect/VM/Conversion/MemRefToVM/ConvertMemRefToVM.cpp index 59a2b91..4e2440c 100644 --- a/iree/compiler/Dialect/VM/Conversion/MemRefToVM/ConvertMemRefToVM.cpp +++ b/iree/compiler/Dialect/VM/Conversion/MemRefToVM/ConvertMemRefToVM.cpp
@@ -6,7 +6,7 @@ #include "iree/compiler/Dialect/VM/Conversion/MemRefToVM/ConvertMemRefToVM.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree/compiler/Dialect/VM/Conversion/TargetOptions.h" #include "iree/compiler/Dialect/VM/Conversion/TypeConverter.h" #include "iree/compiler/Dialect/VM/IR/VMOps.h"
diff --git a/iree/compiler/Dialect/VM/Conversion/MemRefToVM/test/load_store_ops.mlir b/iree/compiler/Dialect/VM/Conversion/MemRefToVM/test/load_store_ops.mlir index 6af16c4..b6f64cc 100644 --- a/iree/compiler/Dialect/VM/Conversion/MemRefToVM/test/load_store_ops.mlir +++ b/iree/compiler/Dialect/VM/Conversion/MemRefToVM/test/load_store_ops.mlir
@@ -1,7 +1,7 @@ // RUN: iree-opt -split-input-file -iree-vm-conversion -iree-vm-target-extension=f32 %s | IreeFileCheck %s module { - // CHECK-LABEL: vm.func @load_store + // CHECK-LABEL: vm.func private @load_store // CHECK-SAME: (%[[BUFFER:.+]]: !vm.buffer, %[[IDX0:.+]]: i32, %[[IDX1:.+]]: i32) -> f32 { func @load_store(%buffer: memref<?xf32>, %idx0: index, %idx1: index) -> f32 { // CHECK-NEXT: %[[C4_0:.+]] = vm.const.i32 4 : i32 @@ -22,7 +22,7 @@ module { // CHECK: vm.rodata private @__constant dense<[0.0287729427, 0.0297581609]> : tensor<2xf32> memref.global "private" constant @__constant : memref<2xf32> = dense<[0.0287729427, 0.0297581609]> - // CHECK-LABEL: vm.func @load_global + // CHECK-LABEL: vm.func private @load_global // CHECK-SAME: (%[[IDX:.+]]: i32) -> f32 { func @load_global(%idx: index) -> f32 { // CHECK-NEXT: %[[BUFFER:.+]] = vm.const.ref.rodata @__constant : !vm.buffer
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/BUILD b/iree/compiler/Dialect/VM/Conversion/StandardToVM/BUILD index d7b8876..4793441 100644 --- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/BUILD +++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/BUILD
@@ -21,7 +21,7 @@ ], deps = [ "//iree/base", - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Dialect/VM/Conversion", "//iree/compiler/Dialect/VM/IR", "@llvm-project//mlir:IR",
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/CMakeLists.txt b/iree/compiler/Dialect/VM/Conversion/StandardToVM/CMakeLists.txt index d12af9a..603e51e 100644 --- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/CMakeLists.txt +++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/CMakeLists.txt
@@ -25,7 +25,7 @@ MLIRTransformUtils MLIRTransforms iree::base - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR iree::compiler::Dialect::VM::Conversion iree::compiler::Dialect::VM::IR PUBLIC
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.cpp b/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.cpp index db74aa8..ea692ba 100644 --- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.cpp +++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.cpp
@@ -7,7 +7,7 @@ #include "iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.h" #include "iree/base/api.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree/compiler/Dialect/VM/Conversion/TargetOptions.h" #include "iree/compiler/Dialect/VM/Conversion/TypeConverter.h" #include "iree/compiler/Dialect/VM/IR/VMOps.h" @@ -124,6 +124,9 @@ rewriter.create<IREE::VM::ExportOp>(srcOp.getLoc(), newFuncOp, exportName); } + // VM functions are private by default and exported via the dedicated + // vm.export ops. + newFuncOp.setPrivate(); rewriter.replaceOp(srcOp, llvm::None); return success();
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/arithmetic_ops.mlir b/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/arithmetic_ops.mlir index 8767d7f..46e00c2 100644 --- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/arithmetic_ops.mlir +++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/arithmetic_ops.mlir
@@ -5,7 +5,7 @@ module @t001_addi { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1: i32) -> (i32) { @@ -22,7 +22,7 @@ module @t002_divis { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1: i32) -> (i32) { @@ -39,7 +39,7 @@ module @t002_diviu { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1: i32) -> (i32) { @@ -56,7 +56,7 @@ module @t003_muli { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1: i32) -> (i32) { @@ -73,7 +73,7 @@ module @t004_remis { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1: i32) -> (i32) { @@ -90,7 +90,7 @@ module @t005_remiu { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1: i32) -> (i32) { @@ -107,7 +107,7 @@ module @t006_subi { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1: i32) -> (i32) { @@ -124,7 +124,7 @@ module @t007_and { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1: i32) -> (i32) { @@ -141,7 +141,7 @@ module @t008_or { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1: i32) -> (i32) { @@ -158,7 +158,7 @@ module @t009_xor { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1: i32) -> (i32) { @@ -175,7 +175,7 @@ module @t010_shift_left { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32) -> (i32) { %c3 = constant 3 : i32 @@ -192,7 +192,7 @@ module @t011_shift_right { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32) -> (i32) { %c3 = constant 3 : i32
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/assignment_ops.mlir b/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/assignment_ops.mlir index b12a8b2..fd67a99 100644 --- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/assignment_ops.mlir +++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/assignment_ops.mlir
@@ -5,7 +5,7 @@ module @t001_cmp_select { module @my_module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG2:[a-zA-Z0-9$._-]+]] @@ -28,7 +28,7 @@ module @t002_cmp_select_index { module @my_module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG2:[a-zA-Z0-9$._-]+]]
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/comparison_ops.mlir b/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/comparison_ops.mlir index afb0896..3d3a6f5 100644 --- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/comparison_ops.mlir +++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/comparison_ops.mlir
@@ -5,7 +5,7 @@ module @t001_cmp_eq_i32 { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1 : i32) -> (i1) { @@ -22,7 +22,7 @@ module @t002_cmp_ne_i32 { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1 : i32) -> (i1) { @@ -39,7 +39,7 @@ module @t003_cmp_slt_i32 { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1 : i32) -> (i1) { @@ -56,7 +56,7 @@ module @t004_cmp_sle_i32 { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1 : i32) -> (i1) { @@ -73,7 +73,7 @@ module @t005_cmp_sgt_i32 { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1 : i32) -> (i1) { @@ -90,7 +90,7 @@ module @t006_cmp_sge_i32 { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1 : i32) -> (i1) { @@ -107,7 +107,7 @@ module @t007_cmp_ult_i32 { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1 : i32) -> (i1) { @@ -124,7 +124,7 @@ module @t008_cmp_ule_i32 { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1 : i32) -> (i1) { @@ -141,7 +141,7 @@ module @t009_cmp_ugt_i32 { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1 : i32) -> (i1) { @@ -158,7 +158,7 @@ module @t010_cmp_uge_i32 { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0: i32, %arg1 : i32) -> (i1) {
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/control_flow_ops.mlir b/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/control_flow_ops.mlir index 3b7d256..d664ce2 100644 --- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/control_flow_ops.mlir +++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/control_flow_ops.mlir
@@ -20,7 +20,7 @@ module @t002_cond_br { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0 : i1, %arg1 : i32, %arg2 : i32) -> (i32) { // CHECK: vm.cond_br %[[ARG0]], ^bb1, ^bb2 @@ -39,7 +39,7 @@ module @t003_br_args { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0 : i32, %arg1 : i32) -> (i32) { @@ -57,7 +57,7 @@ module @t004_cond_br_args { module { - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]] // CHECK-SAME: %[[ARG2:[a-zA-Z0-9$._-]+]] @@ -79,7 +79,7 @@ module { func private @import_fn(%arg0 : i32) -> i32 - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0 : i32) -> (i32) { // CHECK: vm.call @import_fn(%[[ARG0]]) : (i32) -> i32 @@ -96,7 +96,7 @@ module { func private @import_fn(%arg0 : i1) -> i1 - // CHECK: func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0 : i1) -> (i1) { // CHECK: vm.call @import_fn(%[[ARG0]]) : (i32) -> i32 @@ -112,7 +112,7 @@ module @t006_assert { module { - // CHECK: vm.func @my_fn + // CHECK: vm.func private @my_fn // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]] func @my_fn(%arg0 : i32) -> (i32) { %zero = constant 0 : i32
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/func_attrs.mlir b/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/func_attrs.mlir index 0fe1147..7a0cead 100644 --- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/func_attrs.mlir +++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/func_attrs.mlir
@@ -5,7 +5,7 @@ module @t001_iree_reflection { module { - // CHECK: func @t001_iree_reflection + // CHECK: vm.func private @t001_iree_reflection // CHECK-SAME: iree.reflection = {f = "FOOBAR"} func @t001_iree_reflection(%arg0: i32) -> (i32) attributes { iree.reflection = {f = "FOOBAR"} @@ -21,7 +21,7 @@ module @t002_iree_module_export_default { module { - // CHECK: func @internal_function_name + // CHECK: vm.func private @internal_function_name // CHECK: vm.export @internal_function_name func @internal_function_name(%arg0: i32) -> (i32) { return %arg0 : i32
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/structural_ops.mlir b/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/structural_ops.mlir index 91cf036..a062c55 100644 --- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/structural_ops.mlir +++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/structural_ops.mlir
@@ -6,9 +6,9 @@ // CHECK-LABEL: @t001_module_all_options module @t001_module_all_options { -// CHECK: module @my_module { +// CHECK: vm.module public @my_module { module @my_module { - // CHECK: vm.func @my_fn(%[[ARG0:[a-zA-Z0-9$._-]+]]: i32) -> i32 + // CHECK: vm.func private @my_fn(%[[ARG0:[a-zA-Z0-9$._-]+]]: i32) -> i32 func @my_fn(%arg0: i32) -> (i32) { // CHECK: vm.return %[[ARG0]] : i32 return %arg0 : i32 @@ -22,7 +22,7 @@ module @t002_no_args_results { module @my_module { - // CHECK: vm.func @my_fn() { + // CHECK: vm.func private @my_fn() { func @my_fn() -> () { // CHECK: vm.return return @@ -35,7 +35,7 @@ // CHECK-LABEL: @t003_unnamed_module module @t003_unnamed_module { -// CHECK: module @module { +// CHECK: vm.module public @module { module { } @@ -49,9 +49,9 @@ module { // CHECK: module module { - // CHECK-LABEL: vm.module @deeplyNested + // CHECK-LABEL: vm.module public @deeplyNested module @deeplyNested { - // CHECK: vm.func @foo + // CHECK: vm.func private @foo func @foo() { return }
diff --git a/iree/compiler/Dialect/VM/Conversion/TypeConverter.cpp b/iree/compiler/Dialect/VM/Conversion/TypeConverter.cpp index 6afc184..66c0cb8 100644 --- a/iree/compiler/Dialect/VM/Conversion/TypeConverter.cpp +++ b/iree/compiler/Dialect/VM/Conversion/TypeConverter.cpp
@@ -6,9 +6,9 @@ #include "iree/compiler/Dialect/VM/Conversion/TypeConverter.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" #include "iree/compiler/Dialect/Shape/IR/ShapeOps.h" #include "iree/compiler/Dialect/Shape/IR/ShapeTypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree/compiler/Dialect/VM/IR/VMOps.h" #include "llvm/Support/Debug.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" @@ -24,7 +24,7 @@ TypeConverter::TypeConverter(TargetOptions targetOptions) : targetOptions_(targetOptions) { // Variant means opaque in VM. - addConversion([](IREE::VariantType type) { + addConversion([](IREE::Util::VariantType type) { return IREE::VM::OpaqueType::get(type.getContext()); }); @@ -40,14 +40,14 @@ }); // Pointer types remain as pointer types types are passed through unmodified. - addConversion([this](IREE::PtrType type) -> Optional<Type> { + addConversion([this](IREE::Util::PtrType type) -> Optional<Type> { // Recursively handle pointer target types (we want to convert ptr<index> to // ptr<i32>, for example). auto targetType = convertType(type.getTargetType()); if (!targetType) { return llvm::None; } - return IREE::PtrType::get(targetType); + return IREE::Util::PtrType::get(targetType); }); // Convert integer types.
diff --git a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/CMakeLists.txt b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/CMakeLists.txt index 5ef9709..759ecbc 100644 --- a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/CMakeLists.txt +++ b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/CMakeLists.txt
@@ -23,8 +23,8 @@ MLIRPass MLIRStandard MLIRTransforms - iree::compiler::Dialect::IREE::Conversion::PreserveCompilerHints - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::Conversion::PreserveCompilerHints + iree::compiler::Dialect::Util::IR iree::compiler::Dialect::VM::Analysis iree::compiler::Dialect::VM::IR iree::compiler::Dialect::VM::Utils::CallingConvention
diff --git a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp index 2f71bf0..f28725f 100644 --- a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp +++ b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp
@@ -6,9 +6,9 @@ #include "iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.h" -#include "iree/compiler/Dialect/IREE/Conversion/PreserveCompilerHints.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" +#include "iree/compiler/Dialect/Util/Conversion/PreserveCompilerHints.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "iree/compiler/Dialect/VM/IR/VMOps.h" #include "iree/compiler/Dialect/VM/Utils/CallingConvention.h" #include "llvm/ADT/TypeSwitch.h" @@ -1935,7 +1935,7 @@ IREE::VM::EmitCTypeConverter &typeConverter, OwningRewritePatternList &patterns, VMAnalysisCache &vmAnalysisCache) { - populatePreserveCompilerHintsPatterns(context, patterns); + IREE::Util::populatePreserveCompilerHintsPatterns(context, patterns); // CFG patterns.insert<BranchOpConversion>(context); @@ -2223,7 +2223,7 @@ OperationPass<IREE::VM::ModuleOp>> { void getDependentDialects(DialectRegistry ®istry) const override { registry.insert<mlir::emitc::EmitCDialect, mlir::BuiltinDialect, - mlir::StandardOpsDialect, IREEDialect>(); + mlir::StandardOpsDialect, IREE::Util::UtilDialect>(); } StringRef getArgument() const override { return "iree-convert-vm-to-emitc"; } @@ -2264,8 +2264,8 @@ target.addLegalDialect<mlir::emitc::EmitCDialect, mlir::BuiltinDialect, mlir::StandardOpsDialect>(); - target.addDynamicallyLegalOp<IREE::DoNotOptimizeOp>( - [&](IREE::DoNotOptimizeOp op) { + target.addDynamicallyLegalOp<IREE::Util::DoNotOptimizeOp>( + [&](IREE::Util::DoNotOptimizeOp op) { return typeConverter.isLegal(op.getResultTypes()); });
diff --git a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/type_conversion.mlir b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/type_conversion.mlir index fb63cde..8bec09d 100644 --- a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/type_conversion.mlir +++ b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/type_conversion.mlir
@@ -3,23 +3,23 @@ vm.module @my_module { // CHECK-LABEL: @my_module_list_alloc vm.func @list_alloc(%arg0: i32) { - // CHECK: %[[REF:.+]] = "emitc.constant"() {ref_ordinal = 0 : index, value = #emitc.opaque<"{0}">} : () -> !emitc.opaque<"iree_vm_ref_t"> - // CHECK: %[[LISTREF:.+]] = emitc.apply "&"(%[[REF]]) : (!emitc.opaque<"iree_vm_ref_t">) -> !emitc.opaque<"iree_vm_ref_t*"> + // CHECK: %[[REF:.+]] = "emitc.constant"() {ref_ordinal = 0 : index, value = #emitc.opaque<"{0}">} : () -> !emitc.opaque<"iree_vm_ref_t"> + // CHECK: %[[LISTREF:.+]] = emitc.apply "&"(%[[REF]]) : (!emitc.opaque<"iree_vm_ref_t">) -> !emitc.opaque<"iree_vm_ref_t*"> %list = vm.list.alloc %arg0 : (i32) -> !vm.list<i32> - %list_dno = iree.do_not_optimize(%list) : !vm.list<i32> - // CHECK: iree.do_not_optimize(%[[LISTREF]]) : !emitc.opaque<"iree_vm_ref_t*"> + %list_dno = util.do_not_optimize(%list) : !vm.list<i32> + // CHECK: util.do_not_optimize(%[[LISTREF]]) : !emitc.opaque<"iree_vm_ref_t*"> vm.return } // CHECK-LABEL: @my_module_list_size vm.func @list_size(%arg0: i32) { %list = vm.list.alloc %arg0 : (i32) -> !vm.list<i32> - // CHECK: %[[REF:.+]] = "emitc.constant"() {ref_ordinal = 0 : index, value = #emitc.opaque<"{0}">} : () -> !emitc.opaque<"iree_vm_ref_t"> - // CHECK: %[[LISTREF:.+]] = emitc.apply "&"(%[[REF]]) : (!emitc.opaque<"iree_vm_ref_t">) -> !emitc.opaque<"iree_vm_ref_t*"> + // CHECK: %[[REF:.+]] = "emitc.constant"() {ref_ordinal = 0 : index, value = #emitc.opaque<"{0}">} : () -> !emitc.opaque<"iree_vm_ref_t"> + // CHECK: %[[LISTREF:.+]] = emitc.apply "&"(%[[REF]]) : (!emitc.opaque<"iree_vm_ref_t">) -> !emitc.opaque<"iree_vm_ref_t*"> %size = vm.list.size %list : (!vm.list<i32>) -> i32 // CHECK: %[[SIZE:.+]] = emitc.call "iree_vm_list_size"(%{{.+}}) - %size_dno = iree.do_not_optimize(%size) : i32 - // CHECK: iree.do_not_optimize(%[[SIZE]]) : i32 + %size_dno = util.do_not_optimize(%size) : i32 + // CHECK: util.do_not_optimize(%[[SIZE]]) : i32 vm.return } } @@ -31,11 +31,11 @@ // CHECK-LABEL: @my_module_ref vm.export @ref vm.func @ref(%arg0: i32) { - // CHECK: %[[REF:.+]] = "emitc.constant"() {ref_ordinal = 0 : index, value = #emitc.opaque<"{0}">} : () -> !emitc.opaque<"iree_vm_ref_t"> - // CHECK: %[[BUFFERREF:.+]] = emitc.apply "&"(%[[REF]]) : (!emitc.opaque<"iree_vm_ref_t">) -> !emitc.opaque<"iree_vm_ref_t*"> + // CHECK: %[[REF:.+]] = "emitc.constant"() {ref_ordinal = 0 : index, value = #emitc.opaque<"{0}">} : () -> !emitc.opaque<"iree_vm_ref_t"> + // CHECK: %[[BUFFERREF:.+]] = emitc.apply "&"(%[[REF]]) : (!emitc.opaque<"iree_vm_ref_t">) -> !emitc.opaque<"iree_vm_ref_t*"> %buffer = vm.const.ref.rodata @byte_buffer : !vm.buffer - %buffer_dno = iree.do_not_optimize(%buffer) : !vm.buffer - // CHECK: iree.do_not_optimize(%[[BUFFERREF]]) : !emitc.opaque<"iree_vm_ref_t*"> + %buffer_dno = util.do_not_optimize(%buffer) : !vm.buffer + // CHECK: util.do_not_optimize(%[[BUFFERREF]]) : !emitc.opaque<"iree_vm_ref_t*"> vm.return } }
diff --git a/iree/compiler/Dialect/VM/IR/BUILD b/iree/compiler/Dialect/VM/IR/BUILD index 501e95b..faecbae 100644 --- a/iree/compiler/Dialect/VM/IR/BUILD +++ b/iree/compiler/Dialect/VM/IR/BUILD
@@ -30,7 +30,7 @@ include = ["*.td"], ), deps = [ - "//iree/compiler/Dialect/IREE/IR:td_files", + "//iree/compiler/Dialect/Util/IR:td_files", "@llvm-project//mlir:CallInterfacesTdFiles", "@llvm-project//mlir:ControlFlowInterfacesTdFiles", "@llvm-project//mlir:OpBaseTdFiles", @@ -68,7 +68,7 @@ ":VMOpInterfaceGen", ":VMOpsGen", ":VMStructsGen", - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:ControlFlowInterfaces", "@llvm-project//mlir:IR",
diff --git a/iree/compiler/Dialect/VM/IR/CMakeLists.txt b/iree/compiler/Dialect/VM/IR/CMakeLists.txt index bc0e091..f89f2c2 100644 --- a/iree/compiler/Dialect/VM/IR/CMakeLists.txt +++ b/iree/compiler/Dialect/VM/IR/CMakeLists.txt
@@ -47,7 +47,7 @@ MLIRStandard MLIRSupport MLIRTransformUtils - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/Dialect/VM/IR/VMBase.td b/iree/compiler/Dialect/VM/IR/VMBase.td index f1528e1..2aba56d 100644 --- a/iree/compiler/Dialect/VM/IR/VMBase.td +++ b/iree/compiler/Dialect/VM/IR/VMBase.td
@@ -7,7 +7,7 @@ #ifndef IREE_DIALECT_VM_BASE #define IREE_DIALECT_VM_BASE -include "iree/compiler/Dialect/IREE/IR/IREEBase.td" +include "iree/compiler/Dialect/Util/IR/UtilBase.td" //===----------------------------------------------------------------------===// // IREE VM (Virtual Machine) dialect @@ -258,7 +258,7 @@ //===----------------------------------------------------------------------===// def VM_AnyRefObject : DialectType< - IREE_Dialect, + Util_Dialect, CPred<"IREE::VM::RefType::isCompatible($_self)">, "ref_object"> { let description = [{ @@ -267,7 +267,7 @@ } def VM_AnyRef : DialectType< - IREE_Dialect, + Util_Dialect, CPred<"$_self.isa<IREE::VM::RefType>()">, "ref"> { let description = [{ @@ -291,7 +291,7 @@ //===----------------------------------------------------------------------===// def VM_BufferType : DialectType< - IREE_Dialect, + Util_Dialect, CPred<"$_self.isa<IREE::VM::BufferType>()">, "buffer"> { let description = [{ @@ -305,7 +305,7 @@ //===----------------------------------------------------------------------===// def VM_AnyList : DialectType< - IREE_Dialect, + Util_Dialect, And<[ CPred<"$_self.isa<IREE::VM::RefType>()">, CPred<"$_self.cast<IREE::VM::RefType>().getObjectType().isa<IREE::VM::ListType>()">, @@ -395,16 +395,16 @@ //===----------------------------------------------------------------------===// def VM_OrdinalCountsAttr : - IREE_StructAttr<"ordinal_counts", + Util_StructAttr<"ordinal_counts", "OrdinalCountsAttr", VM_Dialect, [ - IREE_StructFieldAttr<"import_funcs", I32Attr>, - IREE_StructFieldAttr<"export_funcs", I32Attr>, - IREE_StructFieldAttr<"internal_funcs", I32Attr>, - IREE_StructFieldAttr<"global_bytes", I32Attr>, - IREE_StructFieldAttr<"global_refs", I32Attr>, - IREE_StructFieldAttr<"rodatas", I32Attr>, - IREE_StructFieldAttr<"rwdatas", I32Attr>, + Util_StructFieldAttr<"import_funcs", I32Attr>, + Util_StructFieldAttr<"export_funcs", I32Attr>, + Util_StructFieldAttr<"internal_funcs", I32Attr>, + Util_StructFieldAttr<"global_bytes", I32Attr>, + Util_StructFieldAttr<"global_refs", I32Attr>, + Util_StructFieldAttr<"rodatas", I32Attr>, + Util_StructFieldAttr<"rwdatas", I32Attr>, ]> { let cppNamespace = "mlir::iree_compiler::IREE::VM"; }
diff --git a/iree/compiler/Dialect/VM/IR/VMOpcodesCore.td b/iree/compiler/Dialect/VM/IR/VMOpcodesCore.td index 1413a66..319fc17 100644 --- a/iree/compiler/Dialect/VM/IR/VMOpcodesCore.td +++ b/iree/compiler/Dialect/VM/IR/VMOpcodesCore.td
@@ -7,7 +7,7 @@ #ifndef IREE_DIALECT_VM_OPCODES_CORE #define IREE_DIALECT_VM_OPCODES_CORE -include "iree/compiler/Dialect/IREE/IR/IREEBase.td" +include "iree/compiler/Dialect/Util/IR/UtilBase.td" //===----------------------------------------------------------------------===// // VM opcodes
diff --git a/iree/compiler/Dialect/VM/IR/VMOps.cpp b/iree/compiler/Dialect/VM/IR/VMOps.cpp index 2abc5de..9b064d8 100644 --- a/iree/compiler/Dialect/VM/IR/VMOps.cpp +++ b/iree/compiler/Dialect/VM/IR/VMOps.cpp
@@ -6,7 +6,7 @@ #include "iree/compiler/Dialect/VM/IR/VMOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringExtras.h" #include "mlir/IR/Attributes.h" @@ -46,8 +46,11 @@ static void printSymbolVisibility(OpAsmPrinter &p, Operation *op, StringAttr symVisibilityAttr) { - if (!symVisibilityAttr) return; - p << symVisibilityAttr.getValue(); + if (!symVisibilityAttr) { + p << "public"; + } else { + p << symVisibilityAttr.getValue(); + } } //===----------------------------------------------------------------------===//
diff --git a/iree/compiler/Dialect/VM/IR/VMOps.h b/iree/compiler/Dialect/VM/IR/VMOps.h index 2d180fb..8f1a405 100644 --- a/iree/compiler/Dialect/VM/IR/VMOps.h +++ b/iree/compiler/Dialect/VM/IR/VMOps.h
@@ -9,7 +9,7 @@ #include <cstdint> -#include "iree/compiler/Dialect/IREE/IR/IREETraits.h" +#include "iree/compiler/Dialect/Util/IR/UtilTraits.h" #include "iree/compiler/Dialect/VM/IR/VMDialect.h" #include "iree/compiler/Dialect/VM/IR/VMTraits.h" #include "iree/compiler/Dialect/VM/IR/VMTypes.h"
diff --git a/iree/compiler/Dialect/VM/IR/VMOps.td b/iree/compiler/Dialect/VM/IR/VMOps.td index 4b0f9db..2401422 100644 --- a/iree/compiler/Dialect/VM/IR/VMOps.td +++ b/iree/compiler/Dialect/VM/IR/VMOps.td
@@ -3518,7 +3518,7 @@ }]; let arguments = (ins - IREE_Status:$status, + Util_Status:$status, OptionalAttr<StrAttr>:$message ); @@ -3573,7 +3573,7 @@ let arguments = (ins VM_CondValue:$condition, - IREE_Status:$status, + Util_Status:$status, OptionalAttr<StrAttr>:$message );
diff --git a/iree/compiler/Dialect/VM/IR/VMTypes.h b/iree/compiler/Dialect/VM/IR/VMTypes.h index 7487e33..e006187 100644 --- a/iree/compiler/Dialect/VM/IR/VMTypes.h +++ b/iree/compiler/Dialect/VM/IR/VMTypes.h
@@ -7,7 +7,7 @@ #ifndef IREE_COMPILER_DIALECT_VM_IR_VMTYPES_H_ #define IREE_COMPILER_DIALECT_VM_IR_VMTYPES_H_ -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringSwitch.h"
diff --git a/iree/compiler/Dialect/VM/IR/test/global_folding.mlir b/iree/compiler/Dialect/VM/IR/test/global_folding.mlir index b6759a9..3c2dd2e 100644 --- a/iree/compiler/Dialect/VM/IR/test/global_folding.mlir +++ b/iree/compiler/Dialect/VM/IR/test/global_folding.mlir
@@ -4,19 +4,19 @@ // CHECK-LABEL: @global_i32_folds vm.module @global_i32_folds { - // CHECK: vm.global.i32 mutable @g0 = 123 : i32 + // CHECK: vm.global.i32 public mutable @g0 = 123 : i32 vm.global.i32 mutable @g0 initializer(@g0init) : i32 vm.func @g0init() -> i32 { %c123 = vm.const.i32 123 : i32 vm.return %c123 : i32 } - // CHECK: vm.global.i32 mutable @g1 : i32 + // CHECK: vm.global.i32 public mutable @g1 : i32 vm.global.i32 mutable @g1 = 0 : i32 - // CHECK: vm.global.i32 @g2 : i32 + // CHECK: vm.global.i32 public @g2 : i32 vm.global.i32 @g2 = 0 : i32 - // CHECK: vm.global.i32 mutable @g3 : i32 + // CHECK: vm.global.i32 public mutable @g3 : i32 vm.global.i32 mutable @g3 initializer(@g3init) : i32 vm.func @g3init() -> i32 { %c0 = vm.const.i32 0 : i32 @@ -28,7 +28,7 @@ // CHECK-LABEL: @global_ref_folds_null vm.module @global_ref_folds_null { - // CHECK: vm.global.ref mutable @g0 : !vm.ref<?> + // CHECK: vm.global.ref public mutable @g0 : !vm.ref<?> vm.global.ref mutable @g0 initializer(@g0init) : !vm.ref<?> vm.func @g0init() -> !vm.ref<?> { %null = vm.const.ref.zero : !vm.ref<?> @@ -92,18 +92,18 @@ // CHECK-LABEL: @fold_load_i32 vm.func @fold_load_i32() -> i32 { - %0 = vm.global.address @g0 : !iree.ptr<i32> + %0 = vm.global.address @g0 : !util.ptr<i32> // CHECK-NEXT: %[[VALUE:.+]] = vm.global.load.i32 @g0 : i32 - %1 = vm.global.load.indirect.i32 %0 : !iree.ptr<i32> -> i32 + %1 = vm.global.load.indirect.i32 %0 : !util.ptr<i32> -> i32 // CHECK-NEXT: vm.return %[[VALUE]] vm.return %1 : i32 } // CHECK-LABEL: @fold_store_i32 vm.func @fold_store_i32(%arg0 : i32) { - %0 = vm.global.address @g0 : !iree.ptr<i32> + %0 = vm.global.address @g0 : !util.ptr<i32> // CHECK-NEXT: vm.global.store.i32 %arg0, @g0 : i32 - vm.global.store.indirect.i32 %arg0, %0 : i32 -> !iree.ptr<i32> + vm.global.store.indirect.i32 %arg0, %0 : i32 -> !util.ptr<i32> vm.return } }
diff --git a/iree/compiler/Dialect/VM/IR/test/global_ops.mlir b/iree/compiler/Dialect/VM/IR/test/global_ops.mlir index 80f2ca3..2f93106 100644 --- a/iree/compiler/Dialect/VM/IR/test/global_ops.mlir +++ b/iree/compiler/Dialect/VM/IR/test/global_ops.mlir
@@ -30,10 +30,10 @@ vm.module @my_module { vm.global.i32 @g0 : i32 vm.func @global_load_indirect_i32() -> i32 { - // CHECK: %[[ADDR:.+]] = vm.global.address @g0 : !iree.ptr<i32> - %0 = vm.global.address @g0 : !iree.ptr<i32> - // CHECK-NEXT: = vm.global.load.indirect.i32 %[[ADDR]] : !iree.ptr<i32> -> i32 - %1 = vm.global.load.indirect.i32 %0 : !iree.ptr<i32> -> i32 + // CHECK: %[[ADDR:.+]] = vm.global.address @g0 : !util.ptr<i32> + %0 = vm.global.address @g0 : !util.ptr<i32> + // CHECK-NEXT: = vm.global.load.indirect.i32 %[[ADDR]] : !util.ptr<i32> -> i32 + %1 = vm.global.load.indirect.i32 %0 : !util.ptr<i32> -> i32 vm.return %1 : i32 } } @@ -44,10 +44,10 @@ vm.module @my_module { vm.global.i32 mutable @g0 : i32 vm.func @global_store_indirect_i32(%arg0 : i32) { - // CHECK: %[[ADDR:.+]] = vm.global.address @g0 : !iree.ptr<i32> - %0 = vm.global.address @g0 : !iree.ptr<i32> - // CHECK-NEXT: vm.global.store.indirect.i32 %arg0, %[[ADDR]] : i32 -> !iree.ptr<i32> - vm.global.store.indirect.i32 %arg0, %0 : i32 -> !iree.ptr<i32> + // CHECK: %[[ADDR:.+]] = vm.global.address @g0 : !util.ptr<i32> + %0 = vm.global.address @g0 : !util.ptr<i32> + // CHECK-NEXT: vm.global.store.indirect.i32 %arg0, %[[ADDR]] : i32 -> !util.ptr<i32> + vm.global.store.indirect.i32 %arg0, %0 : i32 -> !util.ptr<i32> vm.return } } @@ -82,10 +82,10 @@ vm.module @my_module { vm.global.ref @g0 : !vm.ref<?> vm.func @global_load_indirect_ref() -> !vm.ref<?> { - // CHECK: %[[ADDR:.+]] = vm.global.address @g0 : !iree.ptr<!vm.ref<?>> - %0 = vm.global.address @g0 : !iree.ptr<!vm.ref<?>> - // CHECK-NEXT: = vm.global.load.indirect.ref %[[ADDR]] : !iree.ptr<!vm.ref<?>> -> !vm.ref<?> - %1 = vm.global.load.indirect.ref %0 : !iree.ptr<!vm.ref<?>> -> !vm.ref<?> + // CHECK: %[[ADDR:.+]] = vm.global.address @g0 : !util.ptr<!vm.ref<?>> + %0 = vm.global.address @g0 : !util.ptr<!vm.ref<?>> + // CHECK-NEXT: = vm.global.load.indirect.ref %[[ADDR]] : !util.ptr<!vm.ref<?>> -> !vm.ref<?> + %1 = vm.global.load.indirect.ref %0 : !util.ptr<!vm.ref<?>> -> !vm.ref<?> vm.return %1 : !vm.ref<?> } } @@ -96,10 +96,10 @@ vm.module @my_module { vm.global.ref mutable @g0 : !vm.ref<?> vm.func @global_store_indirect_ref(%arg0 : !vm.ref<?>) { - // CHECK: %[[ADDR:.+]] = vm.global.address @g0 : !iree.ptr<!vm.ref<?>> - %0 = vm.global.address @g0 : !iree.ptr<!vm.ref<?>> - // CHECK-NEXT: vm.global.store.indirect.ref %arg0, %[[ADDR]] : !vm.ref<?> -> !iree.ptr<!vm.ref<?>> - vm.global.store.indirect.ref %arg0, %0 : !vm.ref<?> -> !iree.ptr<!vm.ref<?>> + // CHECK: %[[ADDR:.+]] = vm.global.address @g0 : !util.ptr<!vm.ref<?>> + %0 = vm.global.address @g0 : !util.ptr<!vm.ref<?>> + // CHECK-NEXT: vm.global.store.indirect.ref %arg0, %[[ADDR]] : !vm.ref<?> -> !util.ptr<!vm.ref<?>> + vm.global.store.indirect.ref %arg0, %0 : !vm.ref<?> -> !util.ptr<!vm.ref<?>> vm.return } }
diff --git a/iree/compiler/Dialect/VM/IR/test/list_op_verification.mlir b/iree/compiler/Dialect/VM/IR/test/list_op_verification.mlir index 51aad52..fbcb4fa 100644 --- a/iree/compiler/Dialect/VM/IR/test/list_op_verification.mlir +++ b/iree/compiler/Dialect/VM/IR/test/list_op_verification.mlir
@@ -27,8 +27,8 @@ vm.module @module { vm.func @strongly_typed_ref_type_mismatch(%arg0 : !vm.list<!vm.buffer>) { %c100 = vm.const.i32 100 : i32 - // expected-error @+1 {{cannot be accessed as '!vm.ref<!iree.mutable_byte_buffer>'}} - %1 = vm.list.get.ref %arg0, %c100 : (!vm.list<!vm.buffer>, i32) -> !vm.ref<!iree.mutable_byte_buffer> + // expected-error @+1 {{cannot be accessed as '!vm.ref<!util.mutable_byte_buffer>'}} + %1 = vm.list.get.ref %arg0, %c100 : (!vm.list<!vm.buffer>, i32) -> !vm.ref<!util.mutable_byte_buffer> vm.return } }
diff --git a/iree/compiler/Dialect/VM/IR/test/structural_ops.mlir b/iree/compiler/Dialect/VM/IR/test/structural_ops.mlir index 7f9f1ff..72242f5 100644 --- a/iree/compiler/Dialect/VM/IR/test/structural_ops.mlir +++ b/iree/compiler/Dialect/VM/IR/test/structural_ops.mlir
@@ -17,7 +17,7 @@ // CHECK-LABEL: @module_structure vm.module @module_structure { - // CHECK-NEXT: vm.global.i32 @g0 : i32 + // CHECK-NEXT: vm.global.i32 public @g0 : i32 vm.global.i32 @g0 : i32 // CHECK-NEXT: vm.export @fn vm.export @fn
diff --git a/iree/compiler/Dialect/VM/Target/Bytecode/BUILD b/iree/compiler/Dialect/VM/Target/Bytecode/BUILD index dd15acf..4d774f7 100644 --- a/iree/compiler/Dialect/VM/Target/Bytecode/BUILD +++ b/iree/compiler/Dialect/VM/Target/Bytecode/BUILD
@@ -12,6 +12,8 @@ "BytecodeModuleTarget.cpp", "ConstantEncoder.cpp", "ConstantEncoder.h", + "DebugDatabaseBuilder.cpp", + "DebugDatabaseBuilder.h", "TranslationFlags.cpp", "TranslationRegistration.cpp", ], @@ -20,8 +22,8 @@ "TranslationFlags.h", ], deps = [ - "//iree/compiler/Dialect/IREE/IR", - "//iree/compiler/Dialect/IREE/Transforms", + "//iree/compiler/Dialect/Util/IR", + "//iree/compiler/Dialect/Util/Transforms", "//iree/compiler/Dialect/VM/Analysis", "//iree/compiler/Dialect/VM/IR", "//iree/compiler/Dialect/VM/Transforms",
diff --git a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeEncoder.cpp b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeEncoder.cpp index dc85669..cdc6e46 100644 --- a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeEncoder.cpp +++ b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeEncoder.cpp
@@ -6,7 +6,7 @@ #include "iree/compiler/Dialect/VM/Target/Bytecode/BytecodeEncoder.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "iree/compiler/Dialect/VM/Analysis/RegisterAllocation.h" #include "iree/compiler/Dialect/VM/IR/VMDialect.h" #include "llvm/ADT/STLExtras.h" @@ -249,6 +249,8 @@ return std::move(bytecode_); } + size_t getOffset() const { return bytecode_.size(); } + LogicalResult ensureAlignment(size_t alignment) { size_t paddedSize = (bytecode_.size() + (alignment - 1)) & ~(alignment - 1); size_t padding = paddedSize - bytecode_.size(); @@ -327,7 +329,7 @@ // static Optional<EncodedBytecodeFunction> BytecodeEncoder::encodeFunction( IREE::VM::FuncOp funcOp, llvm::DenseMap<Type, int> &typeTable, - SymbolTable &symbolTable) { + SymbolTable &symbolTable, DebugDatabaseBuilder &debugDatabase) { EncodedBytecodeFunction result; // Perform register allocation first so that we can quickly lookup values as @@ -338,6 +340,8 @@ return llvm::None; } + FunctionSourceMap sourceMap; + V0BytecodeEncoder encoder(&typeTable, ®isterAllocation); for (auto &block : funcOp.getBlocks()) { if (failed(encoder.beginBlock(&block))) { @@ -351,6 +355,8 @@ op.emitOpError() << "is not serializable"; return llvm::None; } + sourceMap.locations.push_back( + {static_cast<int32_t>(encoder.getOffset()), op.getLoc()}); if (failed(encoder.beginOp(&op)) || failed(serializableOp.encode(symbolTable, encoder)) || failed(encoder.endOp(&op))) { @@ -365,6 +371,8 @@ } } + debugDatabase.addFunctionSourceMap(funcOp, sourceMap); + if (failed(encoder.ensureAlignment(8))) { funcOp.emitError() << "failed to pad function"; return llvm::None;
diff --git a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeEncoder.h b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeEncoder.h index 4fc126a..94d49a2 100644 --- a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeEncoder.h +++ b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeEncoder.h
@@ -9,6 +9,7 @@ #include "iree/compiler/Dialect/VM/IR/VMFuncEncoder.h" #include "iree/compiler/Dialect/VM/IR/VMOps.h" +#include "iree/compiler/Dialect/VM/Target/Bytecode/DebugDatabaseBuilder.h" #include "mlir/IR/SymbolTable.h" namespace mlir { @@ -17,8 +18,13 @@ namespace VM { struct EncodedBytecodeFunction { + // Encoded bytecode data for the function body. std::vector<uint8_t> bytecodeData; + + // Total i32 register slots required for execution. + // Note that larger types also use these slots (i64=2xi32). uint16_t i32RegisterCount = 0; + // Total vm.ref register slots required for execution. uint16_t refRegisterCount = 0; }; @@ -29,7 +35,7 @@ // Returns None on failure. static Optional<EncodedBytecodeFunction> encodeFunction( IREE::VM::FuncOp funcOp, llvm::DenseMap<Type, int> &typeTable, - SymbolTable &symbolTable); + SymbolTable &symbolTable, DebugDatabaseBuilder &debugDatabase); BytecodeEncoder() = default; ~BytecodeEncoder() = default;
diff --git a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp index 41ab185..5f1ce99 100644 --- a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp +++ b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp
@@ -8,9 +8,9 @@ #include <algorithm> -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" -#include "iree/compiler/Dialect/IREE/Transforms/Passes.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" +#include "iree/compiler/Dialect/Util/Transforms/Passes.h" #include "iree/compiler/Dialect/VM/Analysis/RegisterAllocation.h" #include "iree/compiler/Dialect/VM/Analysis/ValueLiveness.h" #include "iree/compiler/Dialect/VM/IR/VMDialect.h" @@ -34,6 +34,7 @@ #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" #include "mlir/Transforms/DialectConversion.h" +#include "mlir/Transforms/LocationSnapshot.h" #include "mlir/Transforms/Passes.h" #include "mlir/Translation.h" @@ -318,7 +319,7 @@ OwningRewritePatternList patterns(moduleOp.getContext()); ConversionTarget target(*moduleOp.getContext()); target.addLegalDialect<IREE::VM::VMDialect>(); - target.addLegalOp<IREE::DoNotOptimizeOp>(); + target.addLegalOp<IREE::Util::DoNotOptimizeOp>(); // Add all VM canonicalization patterns and mark pseudo-ops illegal. auto *context = moduleOp.getContext(); @@ -356,7 +357,7 @@ modulePasses.addPass(mlir::createCanonicalizerPass()); } - modulePasses.addPass(createDropCompilerHintsPass()); + modulePasses.addPass(IREE::Util::createDropCompilerHintsPass()); // Mark up the module with ordinals for each top-level op (func, etc). // This will make it easier to correlate the MLIR textual output to the @@ -490,6 +491,11 @@ // file and is only used to prime the flatcc builder. iree_vm_BytecodeModuleDef_start_as_root(fbb); + // Debug database is always populated but conditionally written. + // This allows us to emit the database to a separate file if we want to strip + // the module but still allow debugging later. + DebugDatabaseBuilder debugDatabase; + SymbolTable symbolTable(moduleOp); if (!moduleOp.ordinal_counts().hasValue()) { return moduleOp.emitError() << "ordinal_counts attribute not found. The " @@ -582,7 +588,7 @@ size_t totalBytecodeLength = 0; for (auto funcOp : llvm::enumerate(internalFuncOps)) { auto encodedFunction = BytecodeEncoder::encodeFunction( - funcOp.value(), typeOrdinalMap, symbolTable); + funcOp.value(), typeOrdinalMap, symbolTable, debugDatabase); if (!encodedFunction) { return funcOp.value().emitError() << "failed to encode function bytecode"; } @@ -697,6 +703,11 @@ moduleStateDef = iree_vm_ModuleStateDef_end(fbb); } + iree_vm_DebugDatabaseDef_ref_t debugDatabaseRef = 0; + if (!targetOptions.stripSourceMap) { + debugDatabaseRef = debugDatabase.build(fbb); + } + auto moduleNameRef = fbb.createString( moduleOp.sym_name().empty() ? "module" : moduleOp.sym_name()); @@ -711,6 +722,7 @@ iree_vm_BytecodeModuleDef_function_descriptors_add(fbb, functionDescriptorsRef); iree_vm_BytecodeModuleDef_bytecode_data_add(fbb, bytecodeDataRef); + iree_vm_BytecodeModuleDef_debug_database_add(fbb, debugDatabaseRef); iree_vm_BytecodeModuleDef_end_as_root(fbb); return success(); @@ -726,6 +738,17 @@ << "failed to canonicalize vm.module to a serializable form"; } + // Dump VM assembly source listing to a file and annotate IR locations. + if (!targetOptions.sourceListing.empty()) { + OpPrintingFlags printFlags; + printFlags.elideLargeElementsAttrs(8192); + if (failed(mlir::generateLocationsFromIR(targetOptions.sourceListing, "vm", + moduleOp, printFlags))) { + return moduleOp.emitError() << "failed to write source listing to '" + << targetOptions.sourceListing << "'"; + } + } + if (targetOptions.outputFormat == BytecodeOutputFormat::kAnnotatedMlirText) { // Run register allocation now and put the info in the IR so it's printed. for (auto funcOp : moduleOp.getBlock().getOps<IREE::VM::FuncOp>()) {
diff --git a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.h b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.h index d466312..3a18421 100644 --- a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.h +++ b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.h
@@ -39,6 +39,11 @@ // Run basic CSE/inlining/etc passes prior to serialization. bool optimize = true; + // Dump a VM MLIR file and annotate source locations with it. + // This allows for the runtime to serve stack traces referencing both the + // original source locations and the VM IR. + std::string sourceListing; + // Strips all internal symbol names. Import and export names will remain. bool stripSymbols = false; // Strips source map information.
diff --git a/iree/compiler/Dialect/VM/Target/Bytecode/CMakeLists.txt b/iree/compiler/Dialect/VM/Target/Bytecode/CMakeLists.txt index 94f80e9..48d2d79 100644 --- a/iree/compiler/Dialect/VM/Target/Bytecode/CMakeLists.txt +++ b/iree/compiler/Dialect/VM/Target/Bytecode/CMakeLists.txt
@@ -22,6 +22,8 @@ "BytecodeModuleTarget.cpp" "ConstantEncoder.cpp" "ConstantEncoder.h" + "DebugDatabaseBuilder.cpp" + "DebugDatabaseBuilder.h" "TranslationFlags.cpp" "TranslationRegistration.cpp" DEPS @@ -31,8 +33,8 @@ MLIRSupport MLIRTransforms MLIRTranslation - iree::compiler::Dialect::IREE::IR - iree::compiler::Dialect::IREE::Transforms + iree::compiler::Dialect::Util::IR + iree::compiler::Dialect::Util::Transforms iree::compiler::Dialect::VM::Analysis iree::compiler::Dialect::VM::IR iree::compiler::Dialect::VM::Transforms
diff --git a/iree/compiler/Dialect/VM/Target/Bytecode/DebugDatabaseBuilder.cpp b/iree/compiler/Dialect/VM/Target/Bytecode/DebugDatabaseBuilder.cpp new file mode 100644 index 0000000..bb86903 --- /dev/null +++ b/iree/compiler/Dialect/VM/Target/Bytecode/DebugDatabaseBuilder.cpp
@@ -0,0 +1,142 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "iree/compiler/Dialect/VM/Target/Bytecode/DebugDatabaseBuilder.h" + +#include "llvm/ADT/TypeSwitch.h" + +namespace mlir { +namespace iree_compiler { +namespace IREE { +namespace VM { + +void DebugDatabaseBuilder::addFunctionSourceMap(IREE::VM::FuncOp funcOp, + FunctionSourceMap sourceMap) { + uint64_t ordinal = funcOp.ordinal().getValueOr(APInt(64, 0)).getZExtValue(); + if (functionSourceMaps.size() <= ordinal) { + functionSourceMaps.resize(ordinal + 1); + } + functionSourceMaps[ordinal] = std::move(sourceMap); +} + +struct LocationTable { + explicit LocationTable(FlatbufferBuilder &fbb) : fbb(fbb) {} + + FlatbufferBuilder &fbb; + + // String table. + DenseMap<StringRef, flatbuffers_string_ref_t> strings; + // All serialized location entries. + SmallVector<iree_vm_LocationTypeDef_union_ref_t> entries; + // Map of uniqued location to the entry in the table. + DenseMap<Location, int32_t> map; + + // Inserts a string into the location table string subtable if needed. + flatbuffers_string_ref_t insert(StringRef value) { + auto it = strings.find(value); + if (it != strings.end()) return it->second; + auto stringRef = fbb.createString(value); + strings[value] = stringRef; + return stringRef; + } + + // Inserts a location into the location table if it does not already exist. + // Returns the ordinal of the location in the table. + int32_t insert(Location baseLoc) { + auto it = map.find(baseLoc); + if (it != map.end()) return it->second; + auto locationRef = + llvm::TypeSwitch<Location, iree_vm_LocationTypeDef_union_ref_t>(baseLoc) + .Case([&](CallSiteLoc loc) { + auto callee = insert(loc.getCallee()); + auto caller = insert(loc.getCaller()); + return iree_vm_LocationTypeDef_as_CallSiteLocDef( + iree_vm_CallSiteLocDef_create(fbb, callee, caller)); + }) + .Case([&](FileLineColLoc loc) { + return iree_vm_LocationTypeDef_as_FileLineColLocDef( + iree_vm_FileLineColLocDef_create( + fbb, insert(loc.getFilename()), loc.getLine(), + loc.getColumn())); + }) + .Case([&](FusedLoc loc) { + flatbuffers_string_ref_t metadataRef = 0; + if (loc.getMetadata()) { + std::string str; + llvm::raw_string_ostream os(str); + loc.getMetadata().print(os); + metadataRef = insert(os.str()); + } + SmallVector<int32_t> childLocs; + childLocs.reserve(loc.getLocations().size()); + for (auto childLoc : loc.getLocations()) { + childLocs.push_back(insert(childLoc)); + } + auto childLocsRef = flatbuffers_int32_vec_create( + fbb, childLocs.data(), childLocs.size()); + iree_vm_FusedLocDef_start(fbb); + iree_vm_FusedLocDef_metadata_add(fbb, metadataRef); + iree_vm_FusedLocDef_locations_add(fbb, childLocsRef); + return iree_vm_LocationTypeDef_as_FusedLocDef( + iree_vm_FusedLocDef_end(fbb)); + }) + .Case([&](NameLoc loc) { + return iree_vm_LocationTypeDef_as_NameLocDef( + iree_vm_NameLocDef_create(fbb, insert(loc.getName()), + insert(loc.getChildLoc()))); + }) + .Default( + [](Location loc) { return iree_vm_LocationTypeDef_as_NONE(); }); + int32_t ordinal = static_cast<int32_t>(entries.size()); + map[baseLoc] = ordinal; + entries.push_back(locationRef); + return ordinal; + } + + iree_vm_LocationTypeDef_union_vec_ref_t finish() { + return iree_vm_LocationTypeDef_vec_create(fbb, entries.data(), + entries.size()); + } +}; + +iree_vm_DebugDatabaseDef_ref_t DebugDatabaseBuilder::build( + FlatbufferBuilder &fbb) { + if (functionSourceMaps.empty()) return 0; + + LocationTable locationTable(fbb); + + // functions:[FunctionSourceMapDef] + SmallVector<iree_vm_FunctionSourceMapDef_ref_t> functionRefs; + for (auto &sourceMap : functionSourceMaps) { + SmallVector<iree_vm_BytecodeLocationDef_t> locationDefs; + locationDefs.resize(sourceMap.locations.size()); + for (size_t i = 0; i < sourceMap.locations.size(); ++i) { + locationDefs[i].bytecode_offset = sourceMap.locations[i].bytecodeOffset; + locationDefs[i].location = + locationTable.insert(sourceMap.locations[i].location); + } + auto locationsRef = iree_vm_BytecodeLocationDef_vec_create( + fbb, locationDefs.data(), locationDefs.size()); + functionRefs.push_back( + iree_vm_FunctionSourceMapDef_create(fbb, locationsRef)); + } + auto functionsRef = iree_vm_FunctionSourceMapDef_vec_create( + fbb, functionRefs.data(), functionRefs.size()); + + // location_table:[LocationTypeDef] + auto locationTableRef = locationTable.finish(); + + // DebugDatabaseDef + iree_vm_DebugDatabaseDef_start(fbb); + iree_vm_DebugDatabaseDef_location_table_add(fbb, locationTableRef); + iree_vm_DebugDatabaseDef_functions_add(fbb, functionsRef); + return iree_vm_DebugDatabaseDef_end(fbb); +} + +} // namespace VM +} // namespace IREE +} // namespace iree_compiler +} // namespace mlir
diff --git a/iree/compiler/Dialect/VM/Target/Bytecode/DebugDatabaseBuilder.h b/iree/compiler/Dialect/VM/Target/Bytecode/DebugDatabaseBuilder.h new file mode 100644 index 0000000..50ed480 --- /dev/null +++ b/iree/compiler/Dialect/VM/Target/Bytecode/DebugDatabaseBuilder.h
@@ -0,0 +1,49 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#ifndef IREE_COMPILER_DIALECT_VM_TARGET_BYTECODE_DEBUGDATABASEBUILDER_H_ +#define IREE_COMPILER_DIALECT_VM_TARGET_BYTECODE_DEBUGDATABASEBUILDER_H_ + +#include "iree/compiler/Dialect/VM/IR/VMOps.h" +#include "iree/compiler/Utils/FlatbufferUtils.h" +#include "iree/schemas/bytecode_module_def_builder.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/Location.h" + +namespace mlir { +namespace iree_compiler { +namespace IREE { +namespace VM { + +struct BytecodeLocation { + int32_t bytecodeOffset; + Location location; +}; + +struct FunctionSourceMap { + SmallVector<BytecodeLocation> locations; +}; + +class DebugDatabaseBuilder { + public: + // Appends a function source map entry to the debug database. + void addFunctionSourceMap(IREE::VM::FuncOp funcOp, + FunctionSourceMap sourceMap); + + // Finishes construction of the debug database and emits it to the flatbuffer. + iree_vm_DebugDatabaseDef_ref_t build(FlatbufferBuilder &fbb); + + private: + // Function source maps ordered by function ordinal. + SmallVector<FunctionSourceMap> functionSourceMaps; +}; + +} // namespace VM +} // namespace IREE +} // namespace iree_compiler +} // namespace mlir + +#endif // IREE_COMPILER_DIALECT_VM_TARGET_BYTECODE_DEBUGDATABASEBUILDER_H_
diff --git a/iree/compiler/Dialect/VM/Target/Bytecode/TranslationFlags.cpp b/iree/compiler/Dialect/VM/Target/Bytecode/TranslationFlags.cpp index 0400f32..a4764c9 100644 --- a/iree/compiler/Dialect/VM/Target/Bytecode/TranslationFlags.cpp +++ b/iree/compiler/Dialect/VM/Target/Bytecode/TranslationFlags.cpp
@@ -40,6 +40,12 @@ llvm::cl::init(true), }; +static llvm::cl::opt<std::string> sourceListingFlag{ + "iree-vm-bytecode-source-listing", + llvm::cl::desc("Dump a VM MLIR file and annotate source locations with it"), + llvm::cl::init(""), +}; + static llvm::cl::opt<bool> stripSymbolsFlag{ "iree-vm-bytecode-module-strip-symbols", llvm::cl::desc("Strips all internal symbol names from the module"), @@ -69,6 +75,7 @@ BytecodeTargetOptions targetOptions; targetOptions.outputFormat = outputFormatFlag; targetOptions.optimize = optimizeFlag; + targetOptions.sourceListing = sourceListingFlag; targetOptions.stripSymbols = stripSymbolsFlag; targetOptions.stripSourceMap = stripSourceMapFlag; targetOptions.stripDebugOps = stripDebugOpsFlag;
diff --git a/iree/compiler/Dialect/VM/Target/C/CModuleTarget.cpp b/iree/compiler/Dialect/VM/Target/C/CModuleTarget.cpp index f5d3deb..a0c962e 100644 --- a/iree/compiler/Dialect/VM/Target/C/CModuleTarget.cpp +++ b/iree/compiler/Dialect/VM/Target/C/CModuleTarget.cpp
@@ -7,8 +7,8 @@ #include "iree/compiler/Dialect/VM/Target/C/CModuleTarget.h" #include "emitc/Target/Cpp/CppEmitter.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" -#include "iree/compiler/Dialect/IREE/Transforms/Passes.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" +#include "iree/compiler/Dialect/Util/Transforms/Passes.h" #include "iree/compiler/Dialect/VM/Analysis/RegisterAllocation.h" #include "iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.h" #include "iree/compiler/Dialect/VM/Conversion/VMToEmitC/DropExcludedExports.h" @@ -309,7 +309,7 @@ OwningRewritePatternList patterns(moduleOp.getContext()); ConversionTarget target(*moduleOp.getContext()); target.addLegalDialect<IREE::VM::VMDialect>(); - target.addLegalOp<IREE::DoNotOptimizeOp>(); + target.addLegalOp<IREE::Util::DoNotOptimizeOp>(); // Add all VM canonicalization patterns and mark pseudo-ops illegal. auto *context = moduleOp.getContext(); @@ -371,7 +371,7 @@ // C target specific pass modulePasses.addPass(createConvertVMToEmitCPass()); - modulePasses.addPass(createDropCompilerHintsPass()); + modulePasses.addPass(IREE::Util::createDropCompilerHintsPass()); if (failed(passManager.run(moduleOp->getParentOfType<mlir::ModuleOp>()))) { return moduleOp.emitError() << "failed during transform passes";
diff --git a/iree/compiler/Dialect/VM/Transforms/BUILD b/iree/compiler/Dialect/VM/Transforms/BUILD index 07900e4..4a9067f 100644 --- a/iree/compiler/Dialect/VM/Transforms/BUILD +++ b/iree/compiler/Dialect/VM/Transforms/BUILD
@@ -25,9 +25,9 @@ "Passes.h", ], deps = [ - "//iree/compiler/Dialect/IREE/Conversion:PreserveCompilerHints", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/Shape/IR", + "//iree/compiler/Dialect/Util/Conversion:PreserveCompilerHints", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Dialect/VM/Conversion", "//iree/compiler/Dialect/VM/Conversion/IREEToVM", "//iree/compiler/Dialect/VM/Conversion/MathToVM",
diff --git a/iree/compiler/Dialect/VM/Transforms/CMakeLists.txt b/iree/compiler/Dialect/VM/Transforms/CMakeLists.txt index 9facad1..971b3c4 100644 --- a/iree/compiler/Dialect/VM/Transforms/CMakeLists.txt +++ b/iree/compiler/Dialect/VM/Transforms/CMakeLists.txt
@@ -36,9 +36,9 @@ MLIRSupport MLIRTransformUtils MLIRTransforms - iree::compiler::Dialect::IREE::Conversion::PreserveCompilerHints - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::Shape::IR + iree::compiler::Dialect::Util::Conversion::PreserveCompilerHints + iree::compiler::Dialect::Util::IR iree::compiler::Dialect::VM::Conversion iree::compiler::Dialect::VM::Conversion::IREEToVM iree::compiler::Dialect::VM::Conversion::MathToVM
diff --git a/iree/compiler/Dialect/VM/Transforms/Conversion.cpp b/iree/compiler/Dialect/VM/Transforms/Conversion.cpp index fec73c5..7dc52c3 100644 --- a/iree/compiler/Dialect/VM/Transforms/Conversion.cpp +++ b/iree/compiler/Dialect/VM/Transforms/Conversion.cpp
@@ -7,9 +7,9 @@ #include <memory> #include <tuple> -#include "iree/compiler/Dialect/IREE/Conversion/PreserveCompilerHints.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" #include "iree/compiler/Dialect/Shape/IR/ShapeOps.h" +#include "iree/compiler/Dialect/Util/Conversion/PreserveCompilerHints.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" #include "iree/compiler/Dialect/VM/Conversion/ConversionDialectInterface.h" #include "iree/compiler/Dialect/VM/Conversion/ConversionTarget.h" #include "iree/compiler/Dialect/VM/Conversion/IREEToVM/ConvertIREEToVM.h" @@ -82,8 +82,9 @@ : targetOptions_(targetOptions) {} void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert<IREEDialect, IREE::VM::VMDialect, StandardOpsDialect, - math::MathDialect, AffineDialect, memref::MemRefDialect>(); + registry.insert<IREE::Util::UtilDialect, IREE::VM::VMDialect, + StandardOpsDialect, math::MathDialect, AffineDialect, + memref::MemRefDialect>(); } StringRef getArgument() const override { return "iree-vm-conversion"; } @@ -148,8 +149,10 @@ importSymbols, conversionPatterns, typeConverter); } Shape::populateFoldConversionPatterns(context, conversionPatterns); - populatePreserveCompilerHintsPatterns(context, conversionPatterns); - setupCompilerHintsLegality(context, conversionTarget, typeConverter); + IREE::Util::populatePreserveCompilerHintsPatterns(context, + conversionPatterns); + IREE::Util::setupCompilerHintsLegality(context, conversionTarget, + typeConverter); if (failed(applyPartialConversion(outerModuleOp, conversionTarget, std::move(conversionPatterns)))) {
diff --git a/iree/compiler/Dialect/VM/Transforms/GlobalInitialization.cpp b/iree/compiler/Dialect/VM/Transforms/GlobalInitialization.cpp index 2e7ac8b..0757d9b 100644 --- a/iree/compiler/Dialect/VM/Transforms/GlobalInitialization.cpp +++ b/iree/compiler/Dialect/VM/Transforms/GlobalInitialization.cpp
@@ -83,11 +83,13 @@ // If we didn't need to initialize anything then we can elide the functions. if (initFuncOp.getBlocks().front().getOperations().size() > 1) { + initFuncOp.setPrivate(); moduleBuilder.create<ExportOp>(moduleBuilder.getUnknownLoc(), initFuncOp); } else { initFuncOp.erase(); } if (deinitFuncOp.getBlocks().front().getOperations().size() > 1) { + deinitFuncOp.setPrivate(); moduleBuilder.create<ExportOp>(moduleBuilder.getUnknownLoc(), deinitFuncOp); } else {
diff --git a/iree/compiler/Dialect/VM/Transforms/test/deduplicate_rodata.mlir b/iree/compiler/Dialect/VM/Transforms/test/deduplicate_rodata.mlir index 0ee8a42..aab2db6 100644 --- a/iree/compiler/Dialect/VM/Transforms/test/deduplicate_rodata.mlir +++ b/iree/compiler/Dialect/VM/Transforms/test/deduplicate_rodata.mlir
@@ -1,6 +1,6 @@ // RUN: iree-opt -split-input-file -iree-vm-deduplicate-rodata %s | IreeFileCheck %s -// CHECK-LABEL: vm.module @basic +// CHECK-LABEL: vm.module public @basic vm.module @basic { // CHECK: vm.rodata private @const0 dense<0> vm.rodata private @const0 dense<0> : vector<1xi8> @@ -25,7 +25,7 @@ // ----- -// CHECK-LABEL: vm.module @unique_mime_types +// CHECK-LABEL: vm.module public @unique_mime_types vm.module @unique_mime_types { // CHECK: vm.rodata private @const1a {mime_type = "aaa"} dense<1> vm.rodata private @const1a {mime_type = "aaa"} dense<1> : vector<1xi8> @@ -44,7 +44,7 @@ // ----- // {alignment = 16 : i64, mime_type = "application/x-elf"} -// CHECK-LABEL: vm.module @widen_alignment +// CHECK-LABEL: vm.module public @widen_alignment vm.module @widen_alignment { // CHECK: vm.rodata private @const1a {alignment = 16 : i64} dense<1> vm.rodata private @const1a {alignment = 1 : i64} dense<1> : vector<1xi8>
diff --git a/iree/compiler/Dialect/VM/Transforms/test/global_initialization.mlir b/iree/compiler/Dialect/VM/Transforms/test/global_initialization.mlir index cc2c70e..b652d78 100644 --- a/iree/compiler/Dialect/VM/Transforms/test/global_initialization.mlir +++ b/iree/compiler/Dialect/VM/Transforms/test/global_initialization.mlir
@@ -1,6 +1,6 @@ // RUN: iree-opt -split-input-file -pass-pipeline='vm.module(iree-vm-global-initialization)' %s | IreeFileCheck %s -// CHECK: vm.module @initEmpty { +// CHECK: vm.module public @initEmpty { // CHECK: } vm.module @initEmpty { } @@ -9,20 +9,20 @@ // CHECK-LABEL: @initI32 vm.module @initI32 { - // CHECK: vm.global.i32 mutable @g0 : i32 + // CHECK: vm.global.i32 public mutable @g0 : i32 vm.global.i32 mutable @g0 initializer(@g0init) : i32 vm.func @g0init() -> i32 { %c123 = vm.const.i32 123 : i32 vm.return %c123 : i32 } - // CHECK: vm.global.i32 mutable @g1 : i32 + // CHECK: vm.global.i32 public mutable @g1 : i32 vm.global.i32 mutable @g1 = 123 : i32 - // CHECK: vm.global.i32 mutable @g2 : i32 + // CHECK: vm.global.i32 public mutable @g2 : i32 vm.global.i32 @g2 = 123 : i32 - // CHECK: vm.func @__init() { + // CHECK: vm.func private @__init() { // CHECK-NEXT: %0 = vm.call @g0init() // CHECK-NEXT: vm.global.store.i32 %0, @g0 // CHECK-NEXT: %c123 = vm.const.i32 123 : i32 @@ -37,20 +37,20 @@ // CHECK-LABEL: @initRef vm.module @initRef { - // CHECK: vm.global.ref mutable @g0 : !vm.ref<?> + // CHECK: vm.global.ref public mutable @g0 : !vm.ref<?> vm.global.ref mutable @g0 initializer(@g0init) : !vm.ref<?> vm.func @g0init() -> !vm.ref<?> { %null = vm.const.ref.zero : !vm.ref<?> vm.return %null : !vm.ref<?> } - // CHECK: vm.global.ref mutable @g1 : !vm.ref<?> + // CHECK: vm.global.ref public mutable @g1 : !vm.ref<?> vm.global.ref mutable @g1 : !vm.ref<?> - // CHECK: vm.global.ref @g2 : !vm.ref<?> + // CHECK: vm.global.ref public @g2 : !vm.ref<?> vm.global.ref @g2 : !vm.ref<?> - // CHECK: vm.func @__init() { + // CHECK: vm.func private @__init() { // CHECK-NEXT: %ref = vm.call @g0init() // CHECK-NEXT: vm.global.store.ref %ref, @g0 // CHECK-NEXT: vm.return
diff --git a/iree/compiler/Dialect/VM/Transforms/test/ordinal_allocation.mlir b/iree/compiler/Dialect/VM/Transforms/test/ordinal_allocation.mlir index dbc8278..ae2856d 100644 --- a/iree/compiler/Dialect/VM/Transforms/test/ordinal_allocation.mlir +++ b/iree/compiler/Dialect/VM/Transforms/test/ordinal_allocation.mlir
@@ -13,21 +13,21 @@ // CHECK-SAME: rwdatas = 0 // CHECK-SAME: >} vm.module @global_address_propagation { - // CHECK-DAG: vm.global.i32 mutable @g0 {ordinal = 0 : i32} : i32 + // CHECK-DAG: vm.global.i32 public mutable @g0 {ordinal = 0 : i32} : i32 vm.global.i32 mutable @g0 : i32 - // CHECK-DAG: vm.global.i32 mutable @g1 {ordinal = 4 : i32} : i32 + // CHECK-DAG: vm.global.i32 public mutable @g1 {ordinal = 4 : i32} : i32 vm.global.i32 mutable @g1 : i32 // CHECK-NEXT: @main vm.func @main() -> i32 { // CHECK-NEXT: %[[G0_ADDR:.+]] = vm.const.i32 0 - %0 = vm.global.address @g0 : !iree.ptr<i32> + %0 = vm.global.address @g0 : !util.ptr<i32> // CHECK-NEXT: vm.global.load.indirect.i32 %[[G0_ADDR]] - %1 = vm.global.load.indirect.i32 %0 : !iree.ptr<i32> -> i32 + %1 = vm.global.load.indirect.i32 %0 : !util.ptr<i32> -> i32 // CHECK-NEXT: %[[G1_ADDR:.+]] = vm.const.i32 4 - %2 = vm.global.address @g1 : !iree.ptr<i32> + %2 = vm.global.address @g1 : !util.ptr<i32> // CHECK-NEXT: vm.global.load.indirect.i32 %[[G1_ADDR]] - %3 = vm.global.load.indirect.i32 %2 : !iree.ptr<i32> -> i32 + %3 = vm.global.load.indirect.i32 %2 : !util.ptr<i32> -> i32 vm.return %1, %3 : i32, i32 } }
diff --git a/iree/compiler/Dialect/Vulkan/IR/BUILD b/iree/compiler/Dialect/Vulkan/IR/BUILD index de94c04..94ff990 100644 --- a/iree/compiler/Dialect/Vulkan/IR/BUILD +++ b/iree/compiler/Dialect/Vulkan/IR/BUILD
@@ -44,7 +44,7 @@ deps = [ ":VulkanAttrsGen", ":VulkanEnumsGen", - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR", "@llvm-project//mlir:SPIRVDialect",
diff --git a/iree/compiler/Dialect/Vulkan/IR/CMakeLists.txt b/iree/compiler/Dialect/Vulkan/IR/CMakeLists.txt index f7b76d0..00108a7 100644 --- a/iree/compiler/Dialect/Vulkan/IR/CMakeLists.txt +++ b/iree/compiler/Dialect/Vulkan/IR/CMakeLists.txt
@@ -32,7 +32,7 @@ MLIRIR MLIRSPIRV MLIRSupport - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/compiler/InputConversion/Common/BUILD b/iree/compiler/InputConversion/Common/BUILD index beceecb..7e22c00 100644 --- a/iree/compiler/InputConversion/Common/BUILD +++ b/iree/compiler/InputConversion/Common/BUILD
@@ -53,14 +53,11 @@ deps = [ ":PassHeaders", ":PassesIncGen", - "//iree/compiler/Dialect/Flow/IR", - "//iree/compiler/Dialect/Flow/Transforms", "@llvm-project//mlir:LinalgOps", "@llvm-project//mlir:Pass", "@llvm-project//mlir:SCFDialect", "@llvm-project//mlir:SCFToStandard", "@llvm-project//mlir:StandardOps", - "@llvm-project//mlir:TensorDialect", "@llvm-project//mlir:Transforms", ], )
diff --git a/iree/compiler/InputConversion/Common/CMakeLists.txt b/iree/compiler/InputConversion/Common/CMakeLists.txt index 0adcc40..7c2e3a8 100644 --- a/iree/compiler/InputConversion/Common/CMakeLists.txt +++ b/iree/compiler/InputConversion/Common/CMakeLists.txt
@@ -49,10 +49,7 @@ MLIRSCF MLIRSCFToStandard MLIRStandard - MLIRTensor MLIRTransforms - iree::compiler::Dialect::Flow::IR - iree::compiler::Dialect::Flow::Transforms PUBLIC )
diff --git a/iree/compiler/InputConversion/Common/Passes.cpp b/iree/compiler/InputConversion/Common/Passes.cpp index ae3ef3e..84332fd 100644 --- a/iree/compiler/InputConversion/Common/Passes.cpp +++ b/iree/compiler/InputConversion/Common/Passes.cpp
@@ -6,42 +6,17 @@ #include "iree/compiler/InputConversion/Common/Passes.h" -#include "iree/compiler/Dialect/Flow/Transforms/Passes.h" -#include "mlir/Pass/PassManager.h" -#include "mlir/Pass/PassOptions.h" -#include "mlir/Pass/PassRegistry.h" -#include "mlir/Transforms/Passes.h" - namespace mlir { namespace iree_compiler { -void registerCommonConversionPassPipelines() { - PassPipelineRegistration<> common( - "iree-common-input-transformation-pipeline", - "Runs the common input transformation pipeline", - [](OpPassManager &passManager) { - buildCommonInputConversionPassPipeline(passManager); - }); -} - -// Common transformations to prepare input dialects for IREE. -void buildCommonInputConversionPassPipeline(OpPassManager &passManager) { - passManager.addNestedPass<FuncOp>( - IREE::Flow::createConvertToFlowTensorOpsPass( - /*runBeforeDispatchRegionFormation=*/true)); -} - namespace { #define GEN_PASS_REGISTRATION #include "iree/compiler/InputConversion/Common/Passes.h.inc" // IWYU pragma: export } // namespace void registerCommonInputConversionPasses() { - // Generated. + // Generated passes. registerPasses(); - - // Pipelines. - registerCommonConversionPassPipelines(); } } // namespace iree_compiler
diff --git a/iree/compiler/InputConversion/Common/Passes.h b/iree/compiler/InputConversion/Common/Passes.h index 2691031..f67587a 100644 --- a/iree/compiler/InputConversion/Common/Passes.h +++ b/iree/compiler/InputConversion/Common/Passes.h
@@ -14,15 +14,6 @@ namespace iree_compiler { //===----------------------------------------------------------------------===// -// Pipelines -//===----------------------------------------------------------------------===// - -// Performs input legalization for specific combination of input dialects. -void buildCommonInputConversionPassPipeline(OpPassManager &passManager); - -void registerCommonConversionPassPipelines(); - -//===----------------------------------------------------------------------===// // Passes //===----------------------------------------------------------------------===//
diff --git a/iree/compiler/InputConversion/MHLO/BUILD b/iree/compiler/InputConversion/MHLO/BUILD index aaf1475..ade0a0b 100644 --- a/iree/compiler/InputConversion/MHLO/BUILD +++ b/iree/compiler/InputConversion/MHLO/BUILD
@@ -64,9 +64,9 @@ ":PassesIncGen", "//iree/compiler/Dialect/Flow/IR", "//iree/compiler/Dialect/Flow/Transforms", - "//iree/compiler/Dialect/IREE/IR", "//iree/compiler/Dialect/LinalgExt/IR", "//iree/compiler/Dialect/Shape/IR", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/InputConversion/Common", "@llvm-project//llvm:Support", "@llvm-project//mlir:Affine", @@ -88,6 +88,7 @@ "@llvm-project//mlir:Transforms", "@mlir-hlo//:chlo_legalize_to_hlo", "@mlir-hlo//:hlo", + "@mlir-hlo//:legalize_einsum_to_dot_general", "@mlir-hlo//:legalize_gather_to_torch_index_select", "@mlir-hlo//:legalize_to_linalg", "@mlir-hlo//:map_lmhlo_to_scalar_op",
diff --git a/iree/compiler/InputConversion/MHLO/CMakeLists.txt b/iree/compiler/InputConversion/MHLO/CMakeLists.txt index 66276fc..eb1d18d 100644 --- a/iree/compiler/InputConversion/MHLO/CMakeLists.txt +++ b/iree/compiler/InputConversion/MHLO/CMakeLists.txt
@@ -72,9 +72,9 @@ MLIRTransforms iree::compiler::Dialect::Flow::IR iree::compiler::Dialect::Flow::Transforms - iree::compiler::Dialect::IREE::IR iree::compiler::Dialect::LinalgExt::IR iree::compiler::Dialect::Shape::IR + iree::compiler::Dialect::Util::IR iree::compiler::InputConversion::Common tensorflow::mlir_hlo PUBLIC
diff --git a/iree/compiler/InputConversion/MHLO/ConvertMHLOToLinalgExt.cpp b/iree/compiler/InputConversion/MHLO/ConvertMHLOToLinalgExt.cpp index cd94e5e..6fd50a6 100644 --- a/iree/compiler/InputConversion/MHLO/ConvertMHLOToLinalgExt.cpp +++ b/iree/compiler/InputConversion/MHLO/ConvertMHLOToLinalgExt.cpp
@@ -9,9 +9,9 @@ #include "iree/compiler/Dialect/Flow/IR/FlowDialect.h" #include "iree/compiler/Dialect/Flow/IR/FlowOps.h" -#include "iree/compiler/Dialect/IREE/IR/IREEOps.h" #include "iree/compiler/Dialect/LinalgExt/IR/LinalgExtDialect.h" #include "iree/compiler/Dialect/LinalgExt/IR/LinalgExtOps.h" +#include "iree/compiler/Dialect/Util/IR/UtilOps.h" #include "iree/compiler/InputConversion/MHLO/PassDetail.h" #include "iree/compiler/InputConversion/MHLO/Passes.h" #include "iree/compiler/InputConversion/MHLO/Rewriters.h"
diff --git a/iree/compiler/InputConversion/MHLO/MHLOToMHLOPreprocessing.cpp b/iree/compiler/InputConversion/MHLO/MHLOToMHLOPreprocessing.cpp index 5e3655d..72a209d 100644 --- a/iree/compiler/InputConversion/MHLO/MHLOToMHLOPreprocessing.cpp +++ b/iree/compiler/InputConversion/MHLO/MHLOToMHLOPreprocessing.cpp
@@ -842,6 +842,8 @@ } OwningRewritePatternList patterns(&getContext()); + // TODO: Remove once we have a general contraction to matmul pass. + mhlo::PopulateEinsumToDotGeneralPatterns(context, &patterns); mhlo::PopulateUnfuseBatchNormPatterns(context, &patterns); mhlo::PopulateComplexLoweringPatterns(context, &patterns); mhlo::PopulateGatherToTorchIndexSelectPatterns(context, &patterns);
diff --git a/iree/compiler/Translation/BUILD b/iree/compiler/Translation/BUILD index 044807c..4d4bcd1 100644 --- a/iree/compiler/Translation/BUILD +++ b/iree/compiler/Translation/BUILD
@@ -24,12 +24,11 @@ "//iree/compiler/Dialect/HAL/Conversion/HALToVM", "//iree/compiler/Dialect/HAL/Target", "//iree/compiler/Dialect/HAL/Transforms", - "//iree/compiler/Dialect/IREE/Transforms", + "//iree/compiler/Dialect/Util/Transforms", "//iree/compiler/Dialect/VM/Conversion", "//iree/compiler/Dialect/VM/Conversion/StandardToVM", "//iree/compiler/Dialect/VM/Target/Bytecode", "//iree/compiler/Dialect/VM/Transforms", - "//iree/compiler/InputConversion/Common", "//iree/compiler/InputConversion/MHLO", "//iree/compiler/InputConversion/TOSA", "//iree/compiler/Utils",
diff --git a/iree/compiler/Translation/CMakeLists.txt b/iree/compiler/Translation/CMakeLists.txt index 79e486f..823575e 100644 --- a/iree/compiler/Translation/CMakeLists.txt +++ b/iree/compiler/Translation/CMakeLists.txt
@@ -32,7 +32,7 @@ iree::compiler::Dialect::HAL::Conversion::HALToVM iree::compiler::Dialect::HAL::Target iree::compiler::Dialect::HAL::Transforms - iree::compiler::Dialect::IREE::Transforms + iree::compiler::Dialect::Util::Transforms iree::compiler::Dialect::VM::Conversion iree::compiler::Dialect::VM::Conversion::StandardToVM iree::compiler::Dialect::VM::Target::Bytecode
diff --git a/iree/compiler/Translation/IREEVM.cpp b/iree/compiler/Translation/IREEVM.cpp index 6b89909..4a6ef7b 100644 --- a/iree/compiler/Translation/IREEVM.cpp +++ b/iree/compiler/Translation/IREEVM.cpp
@@ -10,10 +10,9 @@ #include "iree/compiler/Bindings/TFLite/Transforms/Passes.h" #include "iree/compiler/Dialect/Flow/Transforms/Passes.h" #include "iree/compiler/Dialect/HAL/Transforms/Passes.h" -#include "iree/compiler/Dialect/IREE/Transforms/Passes.h" +#include "iree/compiler/Dialect/Util/Transforms/Passes.h" #include "iree/compiler/Dialect/VM/Target/Bytecode/TranslationFlags.h" #include "iree/compiler/Dialect/VM/Transforms/Passes.h" -#include "iree/compiler/InputConversion/Common/Passes.h" #include "iree/compiler/InputConversion/MHLO/Passes.h" #include "iree/compiler/InputConversion/TOSA/Passes.h" #include "iree/compiler/Utils/TracingUtils.h" @@ -29,19 +28,6 @@ namespace mlir { namespace iree_compiler { -// TODO(#3817): move all of this code to the iree-compile driver/API. -// Breaking this up such that for development iree-opt runs all passes/pipelines -// and iree-translate strictly does the VM dialect to bytecode/emitc files will -// match upstream better, and then our own iree-compile C API/binary will do the -// whole end-to-end with options for bindings/targets/etc. -struct BindingOptions { - // Whether to include runtime support functions for the IREE native ABI. - bool native = true; - // Whether to include runtime support functions required for the IREE TFLite - // API compatibility bindings. - bool tflite = false; -}; - static BindingOptions getBindingOptionsFromFlags() { static llvm::cl::OptionCategory bindingOptionsCategory( "IREE translation binding support options"); @@ -64,27 +50,6 @@ return bindingOptions; } -// The transformation to apply to the input prior to main compiler execution. -// These input pipelines are purposefully primitive and mainly focused on -// test case/reproducers as opposed to anything that should be coming from -// a user. For user/framework level interfacing, a dedicated importer likely -// needs to be created in order to represent whole-module level framework -// quirks. These are just about the ops in the functions. -struct InputDialectOptions { - enum class Type { - // Applies no input transformation. Only supported core and extension ops - // are supported. - none, - - // Legalizes input defined over TOSA ops. - tosa, - - // Legalizes input defined over MHLO ops. - mhlo, - }; - Type type; -}; - static InputDialectOptions getInputDialectOptionsFromFlags() { static llvm::cl::OptionCategory inputDialectOptions( "IREE options for controlling the input transformations to apply"); @@ -158,7 +123,7 @@ return success(); } -static void buildIREEVMTransformPassPipeline( +void buildIREEVMTransformPassPipeline( BindingOptions bindingOptions, InputDialectOptions inputOptions, IREE::HAL::TargetOptions executableOptions, IREE::VM::TargetOptions targetOptions, OpPassManager &passManager) { @@ -180,11 +145,10 @@ break; } - buildCommonInputConversionPassPipeline(passManager); IREE::Flow::buildFlowTransformPassPipeline(passManager); IREE::HAL::buildHALTransformPassPipeline(passManager, executableOptions); IREE::VM::buildVMTransformPassPipeline(passManager, targetOptions); - passManager.addPass(mlir::iree_compiler::IREE::createDropCompilerHintsPass()); + passManager.addPass(IREE::Util::createDropCompilerHintsPass()); } void buildDefaultIREEVMTransformPassPipeline(OpPassManager &passManager) {
diff --git a/iree/compiler/Translation/IREEVM.h b/iree/compiler/Translation/IREEVM.h index acaaed1..51edc6b 100644 --- a/iree/compiler/Translation/IREEVM.h +++ b/iree/compiler/Translation/IREEVM.h
@@ -18,12 +18,54 @@ namespace mlir { namespace iree_compiler { +// TODO(#3817): move all of this code to the iree-compile driver/API. +// Breaking this up such that for development iree-opt runs all passes/pipelines +// and iree-translate strictly does the VM dialect to bytecode/emitc files will +// match upstream better, and then our own iree-compile C API/binary will do the +// whole end-to-end with options for bindings/targets/etc. +struct BindingOptions { + // Whether to include runtime support functions for the IREE native ABI. + bool native = true; + // Whether to include runtime support functions required for the IREE TFLite + // API compatibility bindings. + bool tflite = false; +}; + +// The transformation to apply to the input prior to main compiler execution. +// These input pipelines are purposefully primitive and mainly focused on +// test case/reproducers as opposed to anything that should be coming from +// a user. For user/framework level interfacing, a dedicated importer likely +// needs to be created in order to represent whole-module level framework +// quirks. These are just about the ops in the functions. +struct InputDialectOptions { + enum class Type { + // Applies no input transformation. Only supported core and extension ops + // are supported. + none, + + // Legalizes input defined over TOSA ops. + tosa, + + // Legalizes input defined over MHLO ops. + mhlo, + }; + Type type = Type::none; +}; + +// Builds the translation pipeline with defaults. +void buildDefaultIREEVMTransformPassPipeline(OpPassManager &passManager); + +// Builds the translation pipeline with explicit options. +void buildIREEVMTransformPassPipeline( + BindingOptions bindingOptions, InputDialectOptions inputOptions, + IREE::HAL::TargetOptions executableOptions, + IREE::VM::TargetOptions targetOptions, OpPassManager &passManager); + +// Registration hooks. void registerIREEVMTransformPassPipeline(); void registerIREEVMTranslation(); void registerIREEVMTranslationFlags(); -void buildDefaultIREEVMTransformPassPipeline(OpPassManager &passManager); - } // namespace iree_compiler } // namespace mlir
diff --git a/iree/compiler/Translation/test/do_not_optimize.mlir b/iree/compiler/Translation/test/do_not_optimize.mlir index 4ce1b3c..b7037fe 100644 --- a/iree/compiler/Translation/test/do_not_optimize.mlir +++ b/iree/compiler/Translation/test/do_not_optimize.mlir
@@ -3,7 +3,7 @@ // CHECK-LABEL: @add func @add() -> i32 { %input = constant 1 : i32 - %unf = iree.do_not_optimize(%input) : i32 + %unf = util.do_not_optimize(%input) : i32 // CHECK: vm.add.i32 %result = addi %unf, %unf : i32 return %result : i32 @@ -25,7 +25,7 @@ // CHECK-LABEL: @chained_add func @chained_add() -> i32 { %input = constant 1 : i32 - %unf = iree.do_not_optimize(%input) : i32 + %unf = util.do_not_optimize(%input) : i32 // CHECK: vm.add.i32 %int = addi %unf, %unf : i32 // CHECK: vm.add.i32 @@ -37,7 +37,7 @@ // CHECK-LABEL: @unfoldable_constant func @unfoldable_constant() -> i32 { - %input = iree.unfoldable_constant 1 : i32 + %input = util.unfoldable_constant 1 : i32 // CHECK: vm.add.i32 %result = addi %input, %input : i32 return %result : i32 @@ -50,7 +50,7 @@ // X-CHECK: vm.func @dynamic_constant // func @dynamic_constant() -> tensor<?x?xf32> { // // X-CHECK: vm.call @hal.buffer_view.dim -// %input = iree.dynamic_shape_constant dense<3.0> : tensor<2x3xf32> -> tensor<?x?xf32> +// %input = util.dynamic_shape_constant dense<3.0> : tensor<2x3xf32> -> tensor<?x?xf32> // %res = "mhlo.abs"(%input) : (tensor<?x?xf32>) -> tensor<?x?xf32> // return %res : tensor<?x?xf32> // }
diff --git a/iree/compiler/Translation/test/smoketest.mlir b/iree/compiler/Translation/test/smoketest.mlir index 26306f2..8718278 100644 --- a/iree/compiler/Translation/test/smoketest.mlir +++ b/iree/compiler/Translation/test/smoketest.mlir
@@ -34,8 +34,8 @@ // CHECK: "local_name": "add" func @add() -> i32 { %c1 = constant 1 : i32 - %unf_c1 = iree.do_not_optimize(%c1) : i32 - %unf_c2 = iree.unfoldable_constant 2 : i32 + %unf_c1 = util.do_not_optimize(%c1) : i32 + %unf_c2 = util.unfoldable_constant 2 : i32 %result = addi %unf_c1, %unf_c2 : i32 return %result : i32 }
diff --git a/iree/compiler/Utils/BUILD b/iree/compiler/Utils/BUILD index c92343c..cbec018 100644 --- a/iree/compiler/Utils/BUILD +++ b/iree/compiler/Utils/BUILD
@@ -28,7 +28,7 @@ deps = [ "//iree/base:tracing", "//iree/base/internal:flatcc", - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR", "@llvm-project//mlir:Pass",
diff --git a/iree/compiler/Utils/CMakeLists.txt b/iree/compiler/Utils/CMakeLists.txt index 9d8d04a..bf3c510 100644 --- a/iree/compiler/Utils/CMakeLists.txt +++ b/iree/compiler/Utils/CMakeLists.txt
@@ -32,7 +32,7 @@ MLIRTransforms iree::base::internal::flatcc iree::base::tracing - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR PUBLIC )
diff --git a/iree/hal/local/loaders/vmvx_module_loader.c b/iree/hal/local/loaders/vmvx_module_loader.c index 7cb0536..85aebda 100644 --- a/iree/hal/local/loaders/vmvx_module_loader.c +++ b/iree/hal/local/loaders/vmvx_module_loader.c
@@ -240,7 +240,7 @@ // func @entry( // %local_memory: !vmvx.buffer, // %constants: !vmvx.buffer, - // %bindings: !iree.list<!vmvx.buffer>, + // %bindings: !util.list<!vmvx.buffer>, // %workgroup_x: index, // %workgroup_y: index, // %workgroup_z: index,
diff --git a/iree/hal/string_util.c b/iree/hal/string_util.c index f495a7a..0c761d8 100644 --- a/iree/hal/string_util.c +++ b/iree/hal/string_util.c
@@ -81,7 +81,7 @@ int n = snprintf(buffer ? buffer + buffer_length : NULL, buffer ? buffer_capacity - buffer_length : 0, (i < shape_rank - 1) ? "%dx" : "%d", shape[i]); - if (n < 0) { + if (IREE_UNLIKELY(n < 0)) { return iree_make_status(IREE_STATUS_FAILED_PRECONDITION, "snprintf failed to write dimension %zu", i); } else if (buffer && n >= buffer_capacity - buffer_length) {
diff --git a/iree/modules/check/test/failure.mlir b/iree/modules/check/test/failure.mlir index f138dc9..82d3df3 100644 --- a/iree/modules/check/test/failure.mlir +++ b/iree/modules/check/test/failure.mlir
@@ -6,7 +6,7 @@ // CHECK: Test failed as expected module @expect_failure { func @expect_true_of_false() { - %false = iree.unfoldable_constant 0 : i32 + %false = util.unfoldable_constant 0 : i32 check.expect_true(%false) : i32 return }
diff --git a/iree/modules/check/test/success.mlir b/iree/modules/check/test/success.mlir index 19f9035..d0125d6 100644 --- a/iree/modules/check/test/success.mlir +++ b/iree/modules/check/test/success.mlir
@@ -2,13 +2,13 @@ // RUN: [[ $IREE_VULKAN_DISABLE == 1 ]] || (iree-translate --iree-input-type=mhlo --iree-hal-target-backends=vulkan-spirv -iree-mlir-to-vm-bytecode-module %s | iree-check-module --driver=vulkan -) func @expect_true() { - %true = iree.unfoldable_constant 1 : i32 + %true = util.unfoldable_constant 1 : i32 check.expect_true(%true) : i32 return } func @expect_false() { - %false = iree.unfoldable_constant 0 : i32 + %false = util.unfoldable_constant 0 : i32 check.expect_false(%false) : i32 return } @@ -24,48 +24,48 @@ } func @expect_all_true_tensor() { - %all_true = iree.unfoldable_constant dense<1> : tensor<2x2xi32> + %all_true = util.unfoldable_constant dense<1> : tensor<2x2xi32> check.expect_all_true(%all_true) : tensor<2x2xi32> return } func @expect_eq() { - %const0 = iree.unfoldable_constant dense<[1, 2, 3, 4, 5]> : tensor<5xi32> - %const1 = iree.unfoldable_constant dense<[1, 2, 3, 4, 5]> : tensor<5xi32> + %const0 = util.unfoldable_constant dense<[1, 2, 3, 4, 5]> : tensor<5xi32> + %const1 = util.unfoldable_constant dense<[1, 2, 3, 4, 5]> : tensor<5xi32> check.expect_eq(%const0, %const1) : tensor<5xi32> return } func @expect_eq_const() { - %const0 = iree.unfoldable_constant dense<[1, 2, 3, 4, 5]> : tensor<5xi32> + %const0 = util.unfoldable_constant dense<[1, 2, 3, 4, 5]> : tensor<5xi32> check.expect_eq_const(%const0, dense<[1, 2, 3, 4, 5]> : tensor<5xi32>) : tensor<5xi32> return } func @expect_almost_eq() { - %const0 = iree.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0, 5.0]> : tensor<5xf32> - %const1 = iree.unfoldable_constant dense<[0.999999, 2.0, 3.0, 4.0, 5.0]> : tensor<5xf32> + %const0 = util.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0, 5.0]> : tensor<5xf32> + %const1 = util.unfoldable_constant dense<[0.999999, 2.0, 3.0, 4.0, 5.0]> : tensor<5xf32> check.expect_almost_eq(%const0, %const1) : tensor<5xf32> return } func @expect_almost_eq_const() { - %const0 = iree.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0, 5.0]> : tensor<5xf32> + %const0 = util.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0, 5.0]> : tensor<5xf32> check.expect_almost_eq_const(%const0, dense<[0.999999, 2.0, 3.0, 4.0, 5.0]> : tensor<5xf32>) : tensor<5xf32> return } func @add() { - %c5 = iree.unfoldable_constant dense<5> : tensor<i32> + %c5 = util.unfoldable_constant dense<5> : tensor<i32> %result = "mhlo.add"(%c5, %c5) : (tensor<i32>, tensor<i32>) -> tensor<i32> - %c10 = iree.unfoldable_constant dense<10> : tensor<i32> + %c10 = util.unfoldable_constant dense<10> : tensor<i32> check.expect_eq(%result, %c10) : tensor<i32> return } func @floats() { - %cp1 = iree.unfoldable_constant dense<0.1> : tensor<f32> - %c1 = iree.unfoldable_constant dense<1.0> : tensor<f32> + %cp1 = util.unfoldable_constant dense<0.1> : tensor<f32> + %c1 = util.unfoldable_constant dense<1.0> : tensor<f32> %p2 = "mhlo.add"(%cp1, %cp1) : (tensor<f32>, tensor<f32>) -> tensor<f32> %p3 = "mhlo.add"(%p2, %cp1) : (tensor<f32>, tensor<f32>) -> tensor<f32> %p4 = "mhlo.add"(%p3, %cp1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
diff --git a/iree/samples/custom_modules/dialect/BUILD b/iree/samples/custom_modules/dialect/BUILD index 43106ac..86c88be 100644 --- a/iree/samples/custom_modules/dialect/BUILD +++ b/iree/samples/custom_modules/dialect/BUILD
@@ -22,7 +22,7 @@ ), deps = [ "//iree/compiler/Dialect/HAL/IR:td_files", - "//iree/compiler/Dialect/IREE/IR:td_files", + "//iree/compiler/Dialect/Util/IR:td_files", "@llvm-project//mlir:StdOpsTdFiles", ], ) @@ -44,7 +44,7 @@ ":custom_ops_gen", "//iree/compiler/Dialect/HAL/Conversion", "//iree/compiler/Dialect/HAL/IR", - "//iree/compiler/Dialect/IREE/IR", + "//iree/compiler/Dialect/Util/IR", "//iree/compiler/Dialect/VM/Conversion", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR",
diff --git a/iree/samples/custom_modules/dialect/CMakeLists.txt b/iree/samples/custom_modules/dialect/CMakeLists.txt index 71cfc8c..161fd2f 100644 --- a/iree/samples/custom_modules/dialect/CMakeLists.txt +++ b/iree/samples/custom_modules/dialect/CMakeLists.txt
@@ -32,7 +32,7 @@ MLIRTransforms iree::compiler::Dialect::HAL::Conversion iree::compiler::Dialect::HAL::IR - iree::compiler::Dialect::IREE::IR + iree::compiler::Dialect::Util::IR iree::compiler::Dialect::VM::Conversion PUBLIC )
diff --git a/iree/samples/custom_modules/dialect/custom_dialect.h b/iree/samples/custom_modules/dialect/custom_dialect.h index 7300b8e..8ad6d1d 100644 --- a/iree/samples/custom_modules/dialect/custom_dialect.h +++ b/iree/samples/custom_modules/dialect/custom_dialect.h
@@ -8,7 +8,7 @@ #define IREE_SAMPLES_CUSTOM_MODULES_DIALECT_CUSTOM_DIALECT_H_ #include "iree/compiler/Dialect/HAL/IR/HALTypes.h" -#include "iree/compiler/Dialect/IREE/IR/IREETypes.h" +#include "iree/compiler/Dialect/Util/IR/UtilTypes.h" #include "mlir/IR/Dialect.h" #include "mlir/IR/OpDefinition.h" #include "mlir/Interfaces/SideEffectInterfaces.h"
diff --git a/iree/samples/custom_modules/dialect/custom_ops.td b/iree/samples/custom_modules/dialect/custom_ops.td index d9d0b65..1b633e3 100644 --- a/iree/samples/custom_modules/dialect/custom_ops.td +++ b/iree/samples/custom_modules/dialect/custom_ops.td
@@ -8,7 +8,7 @@ #define IREE_SAMPLES_CUSTOM_MODULES_DIALECT_CUSTOM_OPS include "iree/compiler/Dialect/HAL/IR/HALBase.td" -include "iree/compiler/Dialect/IREE/IR/IREEBase.td" +include "iree/compiler/Dialect/Util/IR/UtilBase.td" include "mlir/Interfaces/SideEffectInterfaces.td" def CUSTOM_Dialect : Dialect {
diff --git a/iree/samples/custom_modules/dialect/test/conversion.mlir b/iree/samples/custom_modules/dialect/test/conversion.mlir index 07d2892..d7bf428 100644 --- a/iree/samples/custom_modules/dialect/test/conversion.mlir +++ b/iree/samples/custom_modules/dialect/test/conversion.mlir
@@ -128,7 +128,7 @@ // ----- // CHECK: vm.import @custom.get_unique_message -// CHECK-LABEL: func @getUniqueMessageOp +// CHECK-LABEL: @getUniqueMessageOp func @getUniqueMessageOp() -> !custom.message { // CHECK: %ref = vm.call @custom.get_unique_message() : () -> !vm.ref<!custom.message> %0 = "custom.get_unique_message"() : () -> !custom.message
diff --git a/iree/samples/models/mnist.mlir b/iree/samples/models/mnist.mlir index 03fd6e1..f408e3a 100644 --- a/iree/samples/models/mnist.mlir +++ b/iree/samples/models/mnist.mlir
@@ -17,17 +17,17 @@ flow.variable @"__iree_flow___sm_node24__model.layer-2.kernel" dense<"0x695C23BEBF4FA5BDFED4903E7B2AD2BC6EEAC6BED4DDF83DAEB7DCBDF725B9BD9065993DB077BE3D69CB79BEDADA32BEF5A00EBECB63B03EAB2238BD619285BEDA7BF5BDC1BAA0BE9CA5CB3E720E5DBE9A4EA6BE6AF28A3E279C63BD5A72C23D0985F23D42E1543ED355B5BEB359F83D16FEC13DC297423D1EA3583EC2CD46BE59B5C2BD73893C3D385F573D890D2CBE703FF5BD62934D3E9065153EA29407BE77AD883E740E5BBEAAF08A3E206042BECD0D003E7380843E1BE2AA3E9FE8A0BEFE6F6CBDF95D22BEBFD29ABC77BB4CBE3C97003E5F3B33BB39FA14BEF25A1ABED9A7F43E17CC4A3D078096BC771219BE24C0F4BDB48E983E327F393EA9041E3C6FC136BE62C1D4BE4219853E84D7D23EC43745BE739280BEBED991BED58E073DB9021BBECDB88B3E63FB4F3ED3A566BDB54202BE1D15B3BE945D8A3DC389FB3C7C8F9EBDD8C0F33D1BDF4B3E4BD3F8BB89C6F1BD25C5A2BDE30D15BE756EDD3D19E7CBBD47A684BC88B28EBD0C13963C714F253EE9B2DA3E0DF206BE5F193BBEF81F8BBE22D72E3D0AEB4ABBBCA4AA3D914C21BE19B255BE6DBC573E1A81463E33D30BBEAFABC7BD1B72A0BEA726D53E8133003E7367213E0D2F8BBEC1B746BEABCBA23D2ACAA33E96FF91BDC63FA7BC36790C3C9ECD1D3E983A7A3E0AFDEDBD17FE2A3ECEE74A3EC3C46ABE99702C3D5F2C12BEA29371BDBDEB923DBCDEDA39D8FAD836C50405BD150A14BE5FE854BCC621723C9C68263EBB7AC73EFF9281BEEFA56D3E137931BE6405BEBC5E1D103E43F425BE348150BEE8EB9CBB65FC223EF106B2BE1FE9E83D3CEC743E2D40ACBD856F0F3EEBB8273DBCD34A3E183206BEEEE5D33D05DC203EA1E2EA3D4D9087BC224335BDA2AC53BEB208DCBD54F3B43D3FD38CBD00D7263E911E9BBE9395AABE974F04BE5181003F1CD2753EB0DE50BED1CCF2BD97A879BC0B01BDBEF0F6813EDC4AC93EDC7821BEFA6293BE99F2963E782FAB3D25182E3E4BD275BAE91A90BE968065BEAE1398BDFE1002BEDACE05BE37E81DBCA3FE673E4CFC75BEB85C8EBD6BC376BEBD0B12BC58D021BD76F781BDEFCD933E8B65483D0FC4113E3757623C81869C3C802FCD3D82CE22BE9C59E8BADC2F263D5C140F3E01519E3EEBAD893D6DD9993DA767913D436608BE2147C13E918A223DE399173EEBB115BE926C803D44BC11BE83F52EBE14F1AD3C92E925BE67AA38BEBED1013ECC20893E86A8F5BDCD32553E173A683EA536013E6B2140BEAE1774BDB51D9DBEED7E103EBEAD463E914F8DBE52399F3D0F033B3EF5B7333E2D342CBED14B0EBE28EA7A3EA9C2983DFD75793E31844EBE0D7050BEF37FBEBE498D22BDEAC6D7BC4AD6B93DDE3949BEA7E6B8BDE5E1553ECC6E8DBD236ACBBD58D095BEAA88CCBD44352FBD952A6ABE240FC43ED9724F3C9D3668BE448D203F041E65BE0FB227BE3025D4BED1035DBE6FDDA13ED8AF90BCD4EAFE3DAD3F47BE137CD2BE068A7F3B7E9D203F05FFEFBE4ACB983EEF30A7BE238B0ABE0A287FBEF9956C3E47D5B5BC687B823DACAAC33CCEE10A3EA6C249BD056800BDF919A93EA852483DC204BBBD9840F03DF8BA0ABE83A9D13EB0EFFBBE511AE6BBE0DCBFBB1F4267BD5D9C343EE37B853D633FB4BDE2ACF6BC66D65EBEDA08BA3D7F7B1A3E1CA0A53D0EFDE5BA6638563E0B3E1CBEDA32ABBDCE70993C22BCB73D13F323BEA56A7B3E0E7C09BEBE4A65BDAC9FFA3ED6BCF0BEDA41DD3D79DBF33D37B7A2BDFDA448BE81CEC63D30C86BBE25CF93BE6D519DBE6728C33DF26B233EA3F6D63E5B9E0B3E6C31C9BE4F573CBED7944A3E5AE8AABE463586BD0AF5C2BE90C6DA3EE9A1803EB744C13EFE29133E458F7BBEB28C893EF14DBD3E3F59413E6C12333E67F689BE9D64AEBDB534153E5512393EB5827FBD28AA9E3D8C6B2CBE9ACCCE3D17CDB73D1565783ED86C57BD8CF3933EB7B91A3E007E3A3D003A6D3D1E3D463E0687853C8F36173EF4FB0A3E6446813E36411BBE6D16C7BDEAC31BBE47A73CBDD8500EBEBB019E3EB04A3BBA50251BBE445DB7BD8AEE9A3D02C8813AFAB29B3D8386C63DCBA0DBBCF6147C3CAED4243E4D93EA3E2E61ADBE87392EBE79F8C43D8E80BA3ED3AC37BE48FE383EE52095BE77FF7E3EDCB04B3EED0120BEA1D3A0BEAAF6973EA55C9DBE493C9EBDC745DB3ED3A585BE431435BCF414C5BC99FD4DBDE4AD3B3E7D5EC53D6ABC8ABE4C4BD9BE48C18CBD66F147BD077F093EF99FAFBD8F7C7E3EAFE4143DD405013FC3810A3F5365B1BE9B191ABD18E7B5BE8875D93EA6CFACBED292C2BE405AF5BD88F5C43D33FFA73EA8B20A3D9720023E5DDAE53C626681BE35BF88BEA6DD50BEF8A3623EFDC9903ECFCB823EDFEEA13E2D5058BD714A74BD7BB2FBBD92B4DE3D89560A3E5EC483BE455FCD3D8D9627BEF52431BDBE29FEBD177AAD3EC03F993EA34AC0BD201DEDBC277764BD763D92BE4240D6BDFD7C9D3DB589083DAC7FAABD325451BECED065BD4B05363EC207003EE74F843E931C60BE743450BE4AE6993E4E8B203E90E1963E5392BA3DB4D8F4BD9BF2933E7BB0863CBC9945BEEB3CF0BE11CF033E2C3AD2BE50499B3EF5968CBDFF2B3D3E6C8CB23C9045C03EC2CDE5BD8B71D3BE3E539B3E6A37C0BE3E8C203DFDE29A3E5E6B40BD1C38B8BDBFAF8BBDE934723EE11396BDBDF9EE3D8932BBBE53FBAABDFD69FD3CA722D5BD569C353DCA5DE43D1CF8B43D613D8A3E8855FABD78BF32BDAB8AC23D690B8DBE4A579A3E2288843ECE44C3BE79DD6BBEE44A39BE7E1B1A3DA120E73C16A8753D60209FBE35C3EA3E2621FF3D5CD9E7BB3F6A9C3E1EC8F5BDB1F38C3EF06E1D3EBEFCA5BD1A73373EFE13373ED99C58BDD3EB753E07EDE23C0045653E64C9ABBD2258C4BDBF18263D54C85FBE11A19B3E1ACF94BE2280BDBE2A24693E3A1B513D42355B3EE29876BD7C7DF1BEB1E9A63D61CA9CBEF609503E9AA4353EC3F5A03E5854AA3DA32D95BDC26C3A3E7DD85B3D59CAF33C0CC2173D27EE74BDB406313ECC9D83BEEB4476BEA1CE963C92D5B5BD707119BE4F45C33E096D48BE1020AF3EAEB9C8BD916CAF3EA71ECFBE8319B5BE36B30F3E9815463DDE8C8E3EE1FFB63E343B2C3E34FB483C58AAD0BC42AABDBD07C9963BA789FE3D9AAFD83DB491A13E93444B3E3CD8BF3E0476AFBE5B13263E04D768BD73087BBE72A8D83DF1E9313EBFFE283E92A7EA3E5104D63DE2FB483E55DD5EBEE606EC3CF66246BDB3F9BFBE83E6AE3EAD9B9ABE8DB5C0BC7484303E4AADC03EB09AE73D462380BEF1678B3DD26DA53CDC81B3BDBAFDB63DC93C5F3E67C5C4BE0A9A8D3E3F05493E2C1868BEBADE163EC492423E5F3173BE313266BE31B0343D52AED23D558BFFBC238AF4BD7CD1EEBDA21C883E0256BEBEC784C23E443D7FBE8EE6483E3B05E6BDC02B01BEC5402B3E448758BE7207583E9CFDACBE88821EBEA569BF3E25E841BE9D608B3E5CDC1B3E6E2EDBBE2EBD353CF5147E3D3E86D2BD67483A3E5AF14FBE824036BD273497BEB5946E3D84CA4FBE2F4D03BE7812433E64A4473E1783813E7F50C93C16FC703DD1503B3E1E28553EE062813EA32584BE0FA0D5BD397E423DEAD290BDEDA4373DA75D46BEB6C820BD4C55D13D15CD22BE5FF6803D44ED3D3E288B3EBE6D6B6CBE640880BDA0C2383E972C1B3E5D5B1A3ED054703E36BE8C3D43B01A3EDF045BBDA851F0BE67F26D3D0892583E578BBABE5A6C2E3EFD3B5B3D7197903E195C4B3E7F696CBE7D24D5BE3621883EB5FB833ECD44753E99CD34BE02FA88BEEEA10CBEB5B846BE1FA8E13D8F76E73CB307A5BE0A0E19BE2C6A413E3A9DF6BE2146BFBE2F008FBE52ADE33E9464B23DD1549F3E32F0ADBD8CD2EDBD2E3B993E358C633E817034BE83E035BE4CB330BE2827753DE043D0BEC5725EBEDA5F883C89E82FBE834DFF3DEE61DF3D90AFA53DC28F11BE03CB70BE809C8E3C01D481BE7EB822BE0669553E1D0EAABC3CDF04BE30FA86BE6E58B63E88BFBBBD5BB3383E67808EBE5A35F13D8D69253E57EE893DD9C2D43C2E37823E1958ABBE90F900BEB274C93DBE0BAC3D44486D3E4E96B1BEBB60463ED5A3583E6C59D7BDF4D9C73EAA4199BE55FA963EA823B2BDD6165BBE5326A4BE8349A93ECF6A62BDD802CD3EE15991BE8E2476BE800681BE1807DD3C044A68BEF3EE963EC9853FBDBA2AA23E70F6B5BE515789BC411E95BE7DE7BD3EC2100EBEF2A3E6BC65BD46BE9E3664BED10F293E420F103E062DDABC659C86BE5C0A7A3E598D1F3E375886BE9E45A33E482D9BBE116985BE78C0923E13ED463E02FB48BC4EDE833DE00CC53D586522BCE9AC54BDE1A86DBECBEA8BBE57C21ABECB20C53D5A64A53E82865BBE20ABCEBD76B5603E07AA233E23DD91BE60028BBD64B088BDECCFDFBE1DDEE13EC1C63F3E0814543E0F073F3EBC04D23E1C6F8E3E92EA83BC99E8373E6A0BA0BD56B584BC058B6E3D511445BE963B1BBCA0129A3D16E9EF3D42A86DBED763153EAF6480BC862C8CBEDCBB31BE8860EDBBB529013FD86E0CBE217C1BBE0EEA48BE63FAAF3D0838DBBD392D9E3ED42BDDBE18A98EBE191F33BD8032743ECA60933D00AB9CBBB4E95ABDAB9BBE3ED253D0BB8317B13CA555F9BD16DE59BC6F47153E94D67F3EF0C1403E2CB08BBDB379A53D2D182A3E48CF913EAB37573E19BED4BDA8E822BE38E65C3EC038433D36AB67BD8D586D3D1612E63DB20337BE212B823DF5BE0D3E03A70C3EC91752BE456E363DDF5E193C5504E13D964C573DEA1E143ED3C1433E9D2A813E2189ABBEDC6E6B3EF48E61BEC1DD75BEF2D9C5BC5FE3B63D7E3C34BE7A5BD1BD09B3F43D757BA6BE5CF6AEBE9BC6A63C77F48C3C67B2493E2AE7843E799C8FBC0D40A73E6B855A3ED0B9793D989390BE1B810ABDDF21B6BEB7DA0DBE01C4353E3C009A3DEA1395BE5F0244BED4F68E3E8AE0A93C5236843DE16A503ED4468FBE6740913E886F30BED70BF83D1F8B693EEA87383EA957E33D9E2B623B73A2833D9D28D23A101716BE814FB83D04935D3D5228853E40CF693E88461EBE97D48FBECE031F3EE2253B3E6E4E88BE1A81863E86502ABE7A808D3E437468BEF974323E43D3BEBE90086D3EB872183D6A9EC53DBCA6673EE40E8C3EFEBE5BBEDAFD083D299687BE92E91BBE6C5272BDF9B49DBD8EF8D3BD6F45863E389F12BCE322AA3D13A29D3EA6EF9DBE1208923EAD459CBE1FBF3B3EB4EBCFBEED7D013D1583373E0EC703BE56F902BC6E541B3E150E6EBEF7D0AF3EFDE92EBC8C6611BDA825223D895BAA3D1109D0BEA13E113D410210BEA699163E700AF83EE0EAE2BD9172233ED8DA77BE582F33BEE0DD663CA41E36BEC22F15BE084390BDD11C63BB44333B3E0D7B89BD543073BDDFCD113D07366ABD3D484E3E9595E43D2C20FD3DAACE8B3E419D86BE4FF4473EE2D9943DDA2C0A3EA60A573EBED1D13DD635B6BCE5AFDC3B913ED5BD5F950FBE5826D0BCA45A623E5F09CDBD22382BBE10D8253E60D3843E5BE9C4BD98AD25BE1D1E53BE27C3F0BC72E971BE37F5A8BDD04F6D3E1562593E401BAD3D585DD9BD0300B3BEA2EA43BE35CBBABC96D5683EAF436FBEB9AA933E9688C2BD29586BBEC29CD53CEE7990BEBA29ACBEF2311DBE7D1C1EBE2428E03DF548F7BBE62C5EBEC668D03D39F98FBDF29151BE691579BB973CAA3E3BB3C43D11670EBE74FC7BBE5BCC0E3E34ED1D3D25408A3E7E94613EE607913D9E2481BE5C890E3E87E1A13E9A048EBE31A48C3E96A417BDF3AD833E644E643E85E8B4BECFD8213E72094EBE734170BD1B12553EEF747CBE241F96BDB0C7B9BA043C943EF16C12BDE5F8383E298633BCD48AA1BE3C66F2BDB9C84BBE36236B3E02574EBC334F19BEAE515C3EBFADFD3C47DD83BE59C80B3EA3B44FBE749E833E6718163E112051BD1849AB3E014288BD24A10E3E6EE1AB3CE0A752BE536F833C3D604E3D3C0CF4BD4745C93E437AACBE6F9F17BE6D9376BE8EA841BEDE26DC3D798200BEA18814BE0B2B923E7B8C32BE2E683FBE84CE943DE9B310BE509EA8BDDBB1E73DA4A3A33E5B861E3EFDAE1ABE2417A03D48C258BE077CC63E929401BF577EF3BD6A19AF3CA26FD23C13F6203E6BC8D2BE914C4F3C8389CD3E1132593D7FA3EFBDB7414A3E42A89EBE31CD1CBEC54908BDE2C423BDE4D6ADBEABCBAC3E6CCB4ABD78BDB63EE6D998BDAE4144BE9FE238BE6C7F3C3EBB324A3E2E1CEEBDD10358BEC63704BEA6BD4ABC1C8FC93BEA2040BE4774D83D1119053DE7B0E03D4251DF3D715A8F3E185CA3BC99BE813DC165513B0763AABEA0D696BEEEAB5CBE3D576C3E9CDD9B3EB879B1BD1DADAEBEBC36EDBC00A160BDD4FC313CE62B9BBE9E83A7BE2D1FBD3D57AD5D3E0B510A3B4F06423C457EC2BE655D19BE1DAE443E2C8F143ED4FA8D3DCCA964BEBA8276BE2E89E23EE8B13E3E6368C9BE7049DDBDB8F481BEC3A9143D25695DBEDA6B74BD837A903D6F491ABEED3A0C3D197F07BECB2DE0BA610340BD29F87B3E5C97C1BB676C123E2797703D572F303E0134E5BDB1EC26BEC9A6B63E147BACBD6CB5503D198E37BE3E37D7BD616C22BE76C78CBDB84751BE72CA863E200336BE4E54F13D378573BEB6AA61BBC1D8863D4AD897BEF792903EAC18BCBC561F5BBDA07E7DBCBA1EA83D20C910BE459837BE3F67CF3D6497FC3D511531BE7B2344BE47A6303E01DA7A3E73B132BE00F0D03DD4A38CBC3EC48ABED9230F3EF71A68BEE8428DBD5E65BB3D2042B03ECFB7BC3E143F50BE4A37D73D54370C3DAAC138BE4C00873EFD6FA43DBBFD183E00DE3DBC995DBEBEFCE0C53C5500F23E9AC7E03D0360963D696ACF3EFF0B73BE31803B3E9D1B103E6513C6BE486770BE5A9DC6BC4E136BBD3C07223E83F3A73E33844F3C81269CBE8E2D853E522142BE998126BE669417BDE9EB90BE735FB3BD221796BE54074F3DCC278C3D665E09BD243D78BD82147B3D3C3F7D3EA26F9B3EAA743CBEA87F5CBE623D2BBEF91618BEF019DEBD682C8E3E4418893EB0C323BD737CB43D32225F3E4B7931BEF39D8D3EF07E99BE35A2803DD078263E201D7A3EFAEFBDBD82CE7EBE5A73623DC5EF463C54678DBE9C7970BD7B558CBD657C7E3D5DE0DD3C47806DBEC13CAC3E1A4CD53E27EBA3BE604B813DF38B8A3C834553BD76FA073EDA840EBEE9E5CABC98F04EBD8F4BF1BD50B7913EBC48ED3CDD1A4B3D"> : tensor<128x10xf32> attributes {sym_visibility = "private"} flow.variable @"__iree_flow___sm_node25__model.layer-2.bias" dense<[-0.11818973, 0.115988247, 0.0210834835, -0.0308276638, 0.0101165017, 0.119799189, 0.00523598073, 0.117924452, -0.217200637, -0.0239296928]> : tensor<10xf32> attributes {sym_visibility = "private"} func @predict(%arg0: tensor<1x28x28x1xf32>) -> tensor<1x10xf32> attributes {iree.module.export, iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I8!S5!k0_0R3!_0"}} { - %0 = flow.variable.address @"__iree_flow___sm_node17__model.layer-1.kernel" : !iree.ptr<tensor<784x128xf32>> - %1 = flow.variable.address @"__iree_flow___sm_node18__model.layer-1.bias" : !iree.ptr<tensor<128xf32>> - %2 = flow.variable.address @"__iree_flow___sm_node24__model.layer-2.kernel" : !iree.ptr<tensor<128x10xf32>> - %3 = flow.variable.address @"__iree_flow___sm_node25__model.layer-2.bias" : !iree.ptr<tensor<10xf32>> + %0 = flow.variable.address @"__iree_flow___sm_node17__model.layer-1.kernel" : !util.ptr<tensor<784x128xf32>> + %1 = flow.variable.address @"__iree_flow___sm_node18__model.layer-1.bias" : !util.ptr<tensor<128xf32>> + %2 = flow.variable.address @"__iree_flow___sm_node24__model.layer-2.kernel" : !util.ptr<tensor<128x10xf32>> + %3 = flow.variable.address @"__iree_flow___sm_node25__model.layer-2.bias" : !util.ptr<tensor<10xf32>> %4 = mhlo.constant dense<0.000000e+00> : tensor<1x128xf32> %5 = mhlo.constant dense<0xFF800000> : tensor<f32> %6 = mhlo.constant dense<0.000000e+00> : tensor<f32> - %7 = flow.variable.load.indirect %3 : !iree.ptr<tensor<10xf32>> -> tensor<10xf32> - %8 = flow.variable.load.indirect %2 : !iree.ptr<tensor<128x10xf32>> -> tensor<128x10xf32> - %9 = flow.variable.load.indirect %1 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %10 = flow.variable.load.indirect %0 : !iree.ptr<tensor<784x128xf32>> -> tensor<784x128xf32> + %7 = flow.variable.load.indirect %3 : !util.ptr<tensor<10xf32>> -> tensor<10xf32> + %8 = flow.variable.load.indirect %2 : !util.ptr<tensor<128x10xf32>> -> tensor<128x10xf32> + %9 = flow.variable.load.indirect %1 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %10 = flow.variable.load.indirect %0 : !util.ptr<tensor<784x128xf32>> -> tensor<784x128xf32> %11 = "mhlo.reshape"(%arg0) : (tensor<1x28x28x1xf32>) -> tensor<1x784xf32> %12 = "mhlo.dot"(%11, %10) : (tensor<1x784xf32>, tensor<784x128xf32>) -> tensor<1x128xf32> %13 = "mhlo.broadcast_in_dim"(%9) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<128xf32>) -> tensor<1x128xf32>
diff --git a/iree/samples/static_library/static_library_demo.c b/iree/samples/static_library/static_library_demo.c index 22e8127..47b32c2 100644 --- a/iree/samples/static_library/static_library_demo.c +++ b/iree/samples/static_library/static_library_demo.c
@@ -175,7 +175,8 @@ } // Read back the results and ensure we got the right values. - iree_hal_buffer_mapping_t mapped_memory = {0}; + iree_hal_buffer_mapping_t mapped_memory; + memset(&mapped_memory, 0, sizeof(mapped_memory)); if (iree_status_is_ok(status)) { status = iree_hal_buffer_map_range( iree_hal_buffer_view_buffer(ret_buffer_view),
diff --git a/iree/samples/vision/mnist.mlir b/iree/samples/vision/mnist.mlir index 03fd6e1..f408e3a 100644 --- a/iree/samples/vision/mnist.mlir +++ b/iree/samples/vision/mnist.mlir
@@ -17,17 +17,17 @@ flow.variable @"__iree_flow___sm_node24__model.layer-2.kernel" dense<"0x695C23BEBF4FA5BDFED4903E7B2AD2BC6EEAC6BED4DDF83DAEB7DCBDF725B9BD9065993DB077BE3D69CB79BEDADA32BEF5A00EBECB63B03EAB2238BD619285BEDA7BF5BDC1BAA0BE9CA5CB3E720E5DBE9A4EA6BE6AF28A3E279C63BD5A72C23D0985F23D42E1543ED355B5BEB359F83D16FEC13DC297423D1EA3583EC2CD46BE59B5C2BD73893C3D385F573D890D2CBE703FF5BD62934D3E9065153EA29407BE77AD883E740E5BBEAAF08A3E206042BECD0D003E7380843E1BE2AA3E9FE8A0BEFE6F6CBDF95D22BEBFD29ABC77BB4CBE3C97003E5F3B33BB39FA14BEF25A1ABED9A7F43E17CC4A3D078096BC771219BE24C0F4BDB48E983E327F393EA9041E3C6FC136BE62C1D4BE4219853E84D7D23EC43745BE739280BEBED991BED58E073DB9021BBECDB88B3E63FB4F3ED3A566BDB54202BE1D15B3BE945D8A3DC389FB3C7C8F9EBDD8C0F33D1BDF4B3E4BD3F8BB89C6F1BD25C5A2BDE30D15BE756EDD3D19E7CBBD47A684BC88B28EBD0C13963C714F253EE9B2DA3E0DF206BE5F193BBEF81F8BBE22D72E3D0AEB4ABBBCA4AA3D914C21BE19B255BE6DBC573E1A81463E33D30BBEAFABC7BD1B72A0BEA726D53E8133003E7367213E0D2F8BBEC1B746BEABCBA23D2ACAA33E96FF91BDC63FA7BC36790C3C9ECD1D3E983A7A3E0AFDEDBD17FE2A3ECEE74A3EC3C46ABE99702C3D5F2C12BEA29371BDBDEB923DBCDEDA39D8FAD836C50405BD150A14BE5FE854BCC621723C9C68263EBB7AC73EFF9281BEEFA56D3E137931BE6405BEBC5E1D103E43F425BE348150BEE8EB9CBB65FC223EF106B2BE1FE9E83D3CEC743E2D40ACBD856F0F3EEBB8273DBCD34A3E183206BEEEE5D33D05DC203EA1E2EA3D4D9087BC224335BDA2AC53BEB208DCBD54F3B43D3FD38CBD00D7263E911E9BBE9395AABE974F04BE5181003F1CD2753EB0DE50BED1CCF2BD97A879BC0B01BDBEF0F6813EDC4AC93EDC7821BEFA6293BE99F2963E782FAB3D25182E3E4BD275BAE91A90BE968065BEAE1398BDFE1002BEDACE05BE37E81DBCA3FE673E4CFC75BEB85C8EBD6BC376BEBD0B12BC58D021BD76F781BDEFCD933E8B65483D0FC4113E3757623C81869C3C802FCD3D82CE22BE9C59E8BADC2F263D5C140F3E01519E3EEBAD893D6DD9993DA767913D436608BE2147C13E918A223DE399173EEBB115BE926C803D44BC11BE83F52EBE14F1AD3C92E925BE67AA38BEBED1013ECC20893E86A8F5BDCD32553E173A683EA536013E6B2140BEAE1774BDB51D9DBEED7E103EBEAD463E914F8DBE52399F3D0F033B3EF5B7333E2D342CBED14B0EBE28EA7A3EA9C2983DFD75793E31844EBE0D7050BEF37FBEBE498D22BDEAC6D7BC4AD6B93DDE3949BEA7E6B8BDE5E1553ECC6E8DBD236ACBBD58D095BEAA88CCBD44352FBD952A6ABE240FC43ED9724F3C9D3668BE448D203F041E65BE0FB227BE3025D4BED1035DBE6FDDA13ED8AF90BCD4EAFE3DAD3F47BE137CD2BE068A7F3B7E9D203F05FFEFBE4ACB983EEF30A7BE238B0ABE0A287FBEF9956C3E47D5B5BC687B823DACAAC33CCEE10A3EA6C249BD056800BDF919A93EA852483DC204BBBD9840F03DF8BA0ABE83A9D13EB0EFFBBE511AE6BBE0DCBFBB1F4267BD5D9C343EE37B853D633FB4BDE2ACF6BC66D65EBEDA08BA3D7F7B1A3E1CA0A53D0EFDE5BA6638563E0B3E1CBEDA32ABBDCE70993C22BCB73D13F323BEA56A7B3E0E7C09BEBE4A65BDAC9FFA3ED6BCF0BEDA41DD3D79DBF33D37B7A2BDFDA448BE81CEC63D30C86BBE25CF93BE6D519DBE6728C33DF26B233EA3F6D63E5B9E0B3E6C31C9BE4F573CBED7944A3E5AE8AABE463586BD0AF5C2BE90C6DA3EE9A1803EB744C13EFE29133E458F7BBEB28C893EF14DBD3E3F59413E6C12333E67F689BE9D64AEBDB534153E5512393EB5827FBD28AA9E3D8C6B2CBE9ACCCE3D17CDB73D1565783ED86C57BD8CF3933EB7B91A3E007E3A3D003A6D3D1E3D463E0687853C8F36173EF4FB0A3E6446813E36411BBE6D16C7BDEAC31BBE47A73CBDD8500EBEBB019E3EB04A3BBA50251BBE445DB7BD8AEE9A3D02C8813AFAB29B3D8386C63DCBA0DBBCF6147C3CAED4243E4D93EA3E2E61ADBE87392EBE79F8C43D8E80BA3ED3AC37BE48FE383EE52095BE77FF7E3EDCB04B3EED0120BEA1D3A0BEAAF6973EA55C9DBE493C9EBDC745DB3ED3A585BE431435BCF414C5BC99FD4DBDE4AD3B3E7D5EC53D6ABC8ABE4C4BD9BE48C18CBD66F147BD077F093EF99FAFBD8F7C7E3EAFE4143DD405013FC3810A3F5365B1BE9B191ABD18E7B5BE8875D93EA6CFACBED292C2BE405AF5BD88F5C43D33FFA73EA8B20A3D9720023E5DDAE53C626681BE35BF88BEA6DD50BEF8A3623EFDC9903ECFCB823EDFEEA13E2D5058BD714A74BD7BB2FBBD92B4DE3D89560A3E5EC483BE455FCD3D8D9627BEF52431BDBE29FEBD177AAD3EC03F993EA34AC0BD201DEDBC277764BD763D92BE4240D6BDFD7C9D3DB589083DAC7FAABD325451BECED065BD4B05363EC207003EE74F843E931C60BE743450BE4AE6993E4E8B203E90E1963E5392BA3DB4D8F4BD9BF2933E7BB0863CBC9945BEEB3CF0BE11CF033E2C3AD2BE50499B3EF5968CBDFF2B3D3E6C8CB23C9045C03EC2CDE5BD8B71D3BE3E539B3E6A37C0BE3E8C203DFDE29A3E5E6B40BD1C38B8BDBFAF8BBDE934723EE11396BDBDF9EE3D8932BBBE53FBAABDFD69FD3CA722D5BD569C353DCA5DE43D1CF8B43D613D8A3E8855FABD78BF32BDAB8AC23D690B8DBE4A579A3E2288843ECE44C3BE79DD6BBEE44A39BE7E1B1A3DA120E73C16A8753D60209FBE35C3EA3E2621FF3D5CD9E7BB3F6A9C3E1EC8F5BDB1F38C3EF06E1D3EBEFCA5BD1A73373EFE13373ED99C58BDD3EB753E07EDE23C0045653E64C9ABBD2258C4BDBF18263D54C85FBE11A19B3E1ACF94BE2280BDBE2A24693E3A1B513D42355B3EE29876BD7C7DF1BEB1E9A63D61CA9CBEF609503E9AA4353EC3F5A03E5854AA3DA32D95BDC26C3A3E7DD85B3D59CAF33C0CC2173D27EE74BDB406313ECC9D83BEEB4476BEA1CE963C92D5B5BD707119BE4F45C33E096D48BE1020AF3EAEB9C8BD916CAF3EA71ECFBE8319B5BE36B30F3E9815463DDE8C8E3EE1FFB63E343B2C3E34FB483C58AAD0BC42AABDBD07C9963BA789FE3D9AAFD83DB491A13E93444B3E3CD8BF3E0476AFBE5B13263E04D768BD73087BBE72A8D83DF1E9313EBFFE283E92A7EA3E5104D63DE2FB483E55DD5EBEE606EC3CF66246BDB3F9BFBE83E6AE3EAD9B9ABE8DB5C0BC7484303E4AADC03EB09AE73D462380BEF1678B3DD26DA53CDC81B3BDBAFDB63DC93C5F3E67C5C4BE0A9A8D3E3F05493E2C1868BEBADE163EC492423E5F3173BE313266BE31B0343D52AED23D558BFFBC238AF4BD7CD1EEBDA21C883E0256BEBEC784C23E443D7FBE8EE6483E3B05E6BDC02B01BEC5402B3E448758BE7207583E9CFDACBE88821EBEA569BF3E25E841BE9D608B3E5CDC1B3E6E2EDBBE2EBD353CF5147E3D3E86D2BD67483A3E5AF14FBE824036BD273497BEB5946E3D84CA4FBE2F4D03BE7812433E64A4473E1783813E7F50C93C16FC703DD1503B3E1E28553EE062813EA32584BE0FA0D5BD397E423DEAD290BDEDA4373DA75D46BEB6C820BD4C55D13D15CD22BE5FF6803D44ED3D3E288B3EBE6D6B6CBE640880BDA0C2383E972C1B3E5D5B1A3ED054703E36BE8C3D43B01A3EDF045BBDA851F0BE67F26D3D0892583E578BBABE5A6C2E3EFD3B5B3D7197903E195C4B3E7F696CBE7D24D5BE3621883EB5FB833ECD44753E99CD34BE02FA88BEEEA10CBEB5B846BE1FA8E13D8F76E73CB307A5BE0A0E19BE2C6A413E3A9DF6BE2146BFBE2F008FBE52ADE33E9464B23DD1549F3E32F0ADBD8CD2EDBD2E3B993E358C633E817034BE83E035BE4CB330BE2827753DE043D0BEC5725EBEDA5F883C89E82FBE834DFF3DEE61DF3D90AFA53DC28F11BE03CB70BE809C8E3C01D481BE7EB822BE0669553E1D0EAABC3CDF04BE30FA86BE6E58B63E88BFBBBD5BB3383E67808EBE5A35F13D8D69253E57EE893DD9C2D43C2E37823E1958ABBE90F900BEB274C93DBE0BAC3D44486D3E4E96B1BEBB60463ED5A3583E6C59D7BDF4D9C73EAA4199BE55FA963EA823B2BDD6165BBE5326A4BE8349A93ECF6A62BDD802CD3EE15991BE8E2476BE800681BE1807DD3C044A68BEF3EE963EC9853FBDBA2AA23E70F6B5BE515789BC411E95BE7DE7BD3EC2100EBEF2A3E6BC65BD46BE9E3664BED10F293E420F103E062DDABC659C86BE5C0A7A3E598D1F3E375886BE9E45A33E482D9BBE116985BE78C0923E13ED463E02FB48BC4EDE833DE00CC53D586522BCE9AC54BDE1A86DBECBEA8BBE57C21ABECB20C53D5A64A53E82865BBE20ABCEBD76B5603E07AA233E23DD91BE60028BBD64B088BDECCFDFBE1DDEE13EC1C63F3E0814543E0F073F3EBC04D23E1C6F8E3E92EA83BC99E8373E6A0BA0BD56B584BC058B6E3D511445BE963B1BBCA0129A3D16E9EF3D42A86DBED763153EAF6480BC862C8CBEDCBB31BE8860EDBBB529013FD86E0CBE217C1BBE0EEA48BE63FAAF3D0838DBBD392D9E3ED42BDDBE18A98EBE191F33BD8032743ECA60933D00AB9CBBB4E95ABDAB9BBE3ED253D0BB8317B13CA555F9BD16DE59BC6F47153E94D67F3EF0C1403E2CB08BBDB379A53D2D182A3E48CF913EAB37573E19BED4BDA8E822BE38E65C3EC038433D36AB67BD8D586D3D1612E63DB20337BE212B823DF5BE0D3E03A70C3EC91752BE456E363DDF5E193C5504E13D964C573DEA1E143ED3C1433E9D2A813E2189ABBEDC6E6B3EF48E61BEC1DD75BEF2D9C5BC5FE3B63D7E3C34BE7A5BD1BD09B3F43D757BA6BE5CF6AEBE9BC6A63C77F48C3C67B2493E2AE7843E799C8FBC0D40A73E6B855A3ED0B9793D989390BE1B810ABDDF21B6BEB7DA0DBE01C4353E3C009A3DEA1395BE5F0244BED4F68E3E8AE0A93C5236843DE16A503ED4468FBE6740913E886F30BED70BF83D1F8B693EEA87383EA957E33D9E2B623B73A2833D9D28D23A101716BE814FB83D04935D3D5228853E40CF693E88461EBE97D48FBECE031F3EE2253B3E6E4E88BE1A81863E86502ABE7A808D3E437468BEF974323E43D3BEBE90086D3EB872183D6A9EC53DBCA6673EE40E8C3EFEBE5BBEDAFD083D299687BE92E91BBE6C5272BDF9B49DBD8EF8D3BD6F45863E389F12BCE322AA3D13A29D3EA6EF9DBE1208923EAD459CBE1FBF3B3EB4EBCFBEED7D013D1583373E0EC703BE56F902BC6E541B3E150E6EBEF7D0AF3EFDE92EBC8C6611BDA825223D895BAA3D1109D0BEA13E113D410210BEA699163E700AF83EE0EAE2BD9172233ED8DA77BE582F33BEE0DD663CA41E36BEC22F15BE084390BDD11C63BB44333B3E0D7B89BD543073BDDFCD113D07366ABD3D484E3E9595E43D2C20FD3DAACE8B3E419D86BE4FF4473EE2D9943DDA2C0A3EA60A573EBED1D13DD635B6BCE5AFDC3B913ED5BD5F950FBE5826D0BCA45A623E5F09CDBD22382BBE10D8253E60D3843E5BE9C4BD98AD25BE1D1E53BE27C3F0BC72E971BE37F5A8BDD04F6D3E1562593E401BAD3D585DD9BD0300B3BEA2EA43BE35CBBABC96D5683EAF436FBEB9AA933E9688C2BD29586BBEC29CD53CEE7990BEBA29ACBEF2311DBE7D1C1EBE2428E03DF548F7BBE62C5EBEC668D03D39F98FBDF29151BE691579BB973CAA3E3BB3C43D11670EBE74FC7BBE5BCC0E3E34ED1D3D25408A3E7E94613EE607913D9E2481BE5C890E3E87E1A13E9A048EBE31A48C3E96A417BDF3AD833E644E643E85E8B4BECFD8213E72094EBE734170BD1B12553EEF747CBE241F96BDB0C7B9BA043C943EF16C12BDE5F8383E298633BCD48AA1BE3C66F2BDB9C84BBE36236B3E02574EBC334F19BEAE515C3EBFADFD3C47DD83BE59C80B3EA3B44FBE749E833E6718163E112051BD1849AB3E014288BD24A10E3E6EE1AB3CE0A752BE536F833C3D604E3D3C0CF4BD4745C93E437AACBE6F9F17BE6D9376BE8EA841BEDE26DC3D798200BEA18814BE0B2B923E7B8C32BE2E683FBE84CE943DE9B310BE509EA8BDDBB1E73DA4A3A33E5B861E3EFDAE1ABE2417A03D48C258BE077CC63E929401BF577EF3BD6A19AF3CA26FD23C13F6203E6BC8D2BE914C4F3C8389CD3E1132593D7FA3EFBDB7414A3E42A89EBE31CD1CBEC54908BDE2C423BDE4D6ADBEABCBAC3E6CCB4ABD78BDB63EE6D998BDAE4144BE9FE238BE6C7F3C3EBB324A3E2E1CEEBDD10358BEC63704BEA6BD4ABC1C8FC93BEA2040BE4774D83D1119053DE7B0E03D4251DF3D715A8F3E185CA3BC99BE813DC165513B0763AABEA0D696BEEEAB5CBE3D576C3E9CDD9B3EB879B1BD1DADAEBEBC36EDBC00A160BDD4FC313CE62B9BBE9E83A7BE2D1FBD3D57AD5D3E0B510A3B4F06423C457EC2BE655D19BE1DAE443E2C8F143ED4FA8D3DCCA964BEBA8276BE2E89E23EE8B13E3E6368C9BE7049DDBDB8F481BEC3A9143D25695DBEDA6B74BD837A903D6F491ABEED3A0C3D197F07BECB2DE0BA610340BD29F87B3E5C97C1BB676C123E2797703D572F303E0134E5BDB1EC26BEC9A6B63E147BACBD6CB5503D198E37BE3E37D7BD616C22BE76C78CBDB84751BE72CA863E200336BE4E54F13D378573BEB6AA61BBC1D8863D4AD897BEF792903EAC18BCBC561F5BBDA07E7DBCBA1EA83D20C910BE459837BE3F67CF3D6497FC3D511531BE7B2344BE47A6303E01DA7A3E73B132BE00F0D03DD4A38CBC3EC48ABED9230F3EF71A68BEE8428DBD5E65BB3D2042B03ECFB7BC3E143F50BE4A37D73D54370C3DAAC138BE4C00873EFD6FA43DBBFD183E00DE3DBC995DBEBEFCE0C53C5500F23E9AC7E03D0360963D696ACF3EFF0B73BE31803B3E9D1B103E6513C6BE486770BE5A9DC6BC4E136BBD3C07223E83F3A73E33844F3C81269CBE8E2D853E522142BE998126BE669417BDE9EB90BE735FB3BD221796BE54074F3DCC278C3D665E09BD243D78BD82147B3D3C3F7D3EA26F9B3EAA743CBEA87F5CBE623D2BBEF91618BEF019DEBD682C8E3E4418893EB0C323BD737CB43D32225F3E4B7931BEF39D8D3EF07E99BE35A2803DD078263E201D7A3EFAEFBDBD82CE7EBE5A73623DC5EF463C54678DBE9C7970BD7B558CBD657C7E3D5DE0DD3C47806DBEC13CAC3E1A4CD53E27EBA3BE604B813DF38B8A3C834553BD76FA073EDA840EBEE9E5CABC98F04EBD8F4BF1BD50B7913EBC48ED3CDD1A4B3D"> : tensor<128x10xf32> attributes {sym_visibility = "private"} flow.variable @"__iree_flow___sm_node25__model.layer-2.bias" dense<[-0.11818973, 0.115988247, 0.0210834835, -0.0308276638, 0.0101165017, 0.119799189, 0.00523598073, 0.117924452, -0.217200637, -0.0239296928]> : tensor<10xf32> attributes {sym_visibility = "private"} func @predict(%arg0: tensor<1x28x28x1xf32>) -> tensor<1x10xf32> attributes {iree.module.export, iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I8!S5!k0_0R3!_0"}} { - %0 = flow.variable.address @"__iree_flow___sm_node17__model.layer-1.kernel" : !iree.ptr<tensor<784x128xf32>> - %1 = flow.variable.address @"__iree_flow___sm_node18__model.layer-1.bias" : !iree.ptr<tensor<128xf32>> - %2 = flow.variable.address @"__iree_flow___sm_node24__model.layer-2.kernel" : !iree.ptr<tensor<128x10xf32>> - %3 = flow.variable.address @"__iree_flow___sm_node25__model.layer-2.bias" : !iree.ptr<tensor<10xf32>> + %0 = flow.variable.address @"__iree_flow___sm_node17__model.layer-1.kernel" : !util.ptr<tensor<784x128xf32>> + %1 = flow.variable.address @"__iree_flow___sm_node18__model.layer-1.bias" : !util.ptr<tensor<128xf32>> + %2 = flow.variable.address @"__iree_flow___sm_node24__model.layer-2.kernel" : !util.ptr<tensor<128x10xf32>> + %3 = flow.variable.address @"__iree_flow___sm_node25__model.layer-2.bias" : !util.ptr<tensor<10xf32>> %4 = mhlo.constant dense<0.000000e+00> : tensor<1x128xf32> %5 = mhlo.constant dense<0xFF800000> : tensor<f32> %6 = mhlo.constant dense<0.000000e+00> : tensor<f32> - %7 = flow.variable.load.indirect %3 : !iree.ptr<tensor<10xf32>> -> tensor<10xf32> - %8 = flow.variable.load.indirect %2 : !iree.ptr<tensor<128x10xf32>> -> tensor<128x10xf32> - %9 = flow.variable.load.indirect %1 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %10 = flow.variable.load.indirect %0 : !iree.ptr<tensor<784x128xf32>> -> tensor<784x128xf32> + %7 = flow.variable.load.indirect %3 : !util.ptr<tensor<10xf32>> -> tensor<10xf32> + %8 = flow.variable.load.indirect %2 : !util.ptr<tensor<128x10xf32>> -> tensor<128x10xf32> + %9 = flow.variable.load.indirect %1 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %10 = flow.variable.load.indirect %0 : !util.ptr<tensor<784x128xf32>> -> tensor<784x128xf32> %11 = "mhlo.reshape"(%arg0) : (tensor<1x28x28x1xf32>) -> tensor<1x784xf32> %12 = "mhlo.dot"(%11, %10) : (tensor<1x784xf32>, tensor<784x128xf32>) -> tensor<1x128xf32> %13 = "mhlo.broadcast_in_dim"(%9) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<128xf32>) -> tensor<1x128xf32>
diff --git a/iree/schemas/bytecode_module_def.fbs b/iree/schemas/bytecode_module_def.fbs index 297a44e..4ea31b3 100644 --- a/iree/schemas/bytecode_module_def.fbs +++ b/iree/schemas/bytecode_module_def.fbs
@@ -116,6 +116,7 @@ // Offset and length within the larger bytecode data block. bytecode_offset:int32; bytecode_length:int32; + // TODO(benvanik): remove counts and embed directly in bytecode. // Total number of i32 registers used by the function. i32_register_count:int16; @@ -123,6 +124,62 @@ ref_register_count:int16; } +// mlir/IR/BuiltinLocationAttributes.td : CallSiteLoc +table CallSiteLocDef { + callee:int32; + caller:int32; +} + +// mlir/IR/BuiltinLocationAttributes.td : FileLineColLoc +table FileLineColLocDef { + filename:string; + line:int32; + column:int32; +} + +// mlir/IR/BuiltinLocationAttributes.td : FusedLoc +table FusedLocDef { + metadata:string; + locations:[int32]; +} + +// mlir/IR/BuiltinLocationAttributes.td : FusedLoc +table NameLocDef { + name:string; + child_location:int32; +} + +// A location - possibly nested. +union LocationTypeDef { + CallSiteLocDef, + FileLineColLocDef, + FusedLocDef, + NameLocDef, +} + +// Maps a relative bytecode offset within a function to a source location. +struct BytecodeLocationDef { + // Bytecode offset of the start of the operation. + bytecode_offset:int32; + // Index into the debug database location_table. + location:int32; +} + +// Debug data for a single function mapping back into source IR. +table FunctionSourceMapDef { + // Operation locations for all ops within the function. + locations:[BytecodeLocationDef]; +} + +// VM debug information database. +table DebugDatabaseDef { + // Location table. Source maps reference this table. + location_table:[LocationTypeDef]; + + // Internal function source maps; 1:1 with the module function_descriptors. + functions:[FunctionSourceMapDef]; +} + // Defines a bytecode module containing the information required to serve the // iree_vm_module_interface_t interface. // @@ -176,6 +233,9 @@ // Bytecode contents. One large buffer containing all of the function op data. bytecode_data:[uint8]; + + // Optional module debug database. + debug_database:DebugDatabaseDef; } root_type BytecodeModuleDef;
diff --git a/iree/test/e2e/cuda_specific/dot.mlir b/iree/test/e2e/cuda_specific/dot.mlir index b5470e8..b10c1e4 100644 --- a/iree/test/e2e/cuda_specific/dot.mlir +++ b/iree/test/e2e/cuda_specific/dot.mlir
@@ -1,11 +1,11 @@ func @f32() { - %lhs = iree.unfoldable_constant dense<[ + %lhs = util.unfoldable_constant dense<[ [15.0, 14.0, 13.0], [12.0, 11.0, 10.0], [09.0, 08.0, 07.0], [06.0, 05.0, 04.0], [03.0, 02.0, 01.0]]> : tensor<5x3xf32> - %rhs = iree.unfoldable_constant dense<[ + %rhs = util.unfoldable_constant dense<[ [15.0, 14.0, 13.0, 12.0, 11.0], [10.0, 09.0, 08.0, 07.0, 06.0], [05.0, 04.0, 03.0, 02.0, 01.0]]> : tensor<3x5xf32> @@ -21,8 +21,8 @@ // large aligned case that can be vectorized. func @large_aligned() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<252x1024xf32> - %rhs = iree.unfoldable_constant dense<0.4> : tensor<1024x500xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<252x1024xf32> + %rhs = util.unfoldable_constant dense<0.4> : tensor<1024x500xf32> %res = "mhlo.dot"(%lhs, %rhs) : (tensor<252x1024xf32>, tensor<1024x500xf32>) -> tensor<252x500xf32> check.expect_almost_eq_const(%res, dense<409.596> : tensor<252x500xf32>) : tensor<252x500xf32> return
diff --git a/iree/test/e2e/models/bert_encoder_unrolled_fake_weights.mlir b/iree/test/e2e/models/bert_encoder_unrolled_fake_weights.mlir index 03aac02..4fc01be 100644 --- a/iree/test/e2e/models/bert_encoder_unrolled_fake_weights.mlir +++ b/iree/test/e2e/models/bert_encoder_unrolled_fake_weights.mlir
@@ -1115,1122 +1115,1122 @@ flow.variable @"__iree_flow_cls/squad/output_bias" dense<0.1> : tensor<2xf32> attributes {sym_visibility = "private"} flow.variable @"__iree_flow_cls/squad/output_weights" dense<1.0> : tensor<2x512xf32> attributes {sym_visibility = "private"} func @serving_default() attributes { iree.module.export} { - %arg0 = iree.unfoldable_constant dense<0> : tensor<1x384xi32> - %arg1 = iree.unfoldable_constant dense<0> : tensor<1x384xi32> - %arg2 = iree.unfoldable_constant dense<0> : tensor<1x384xi32> - %0 = flow.variable.address @"__iree_flow_bert/embeddings/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %1 = flow.variable.address @"__iree_flow_bert/embeddings/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %2 = flow.variable.address @"__iree_flow_bert/embeddings/embedding_transformation/bias" : !iree.ptr<tensor<512xf32>> - %3 = flow.variable.address @"__iree_flow_bert/embeddings/embedding_transformation/kernel" : !iree.ptr<tensor<384x512xf32>> - %4 = flow.variable.address @"__iree_flow_bert/embeddings/position_embeddings" : !iree.ptr<tensor<512x512xf32>> - %5 = flow.variable.address @"__iree_flow_bert/embeddings/token_type_embeddings" : !iree.ptr<tensor<2x512xf32>> - %6 = flow.variable.address @"__iree_flow_bert/embeddings/word_embeddings" : !iree.ptr<tensor<30522x128xf32>> - %7 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %8 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %9 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %10 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %11 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %12 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %13 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %14 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %15 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %16 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %17 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %18 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %19 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %20 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %21 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %22 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %23 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %24 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %25 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %26 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %27 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %28 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %29 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %30 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %31 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %32 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %33 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %34 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %35 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %36 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %37 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %38 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %39 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %40 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %41 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %42 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %43 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %44 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %45 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %46 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %47 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %48 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %49 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %50 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %51 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %52 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %53 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %54 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %55 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %56 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %57 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %58 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %59 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %60 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %61 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %62 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %63 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %64 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %65 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %66 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %67 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %68 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %69 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %70 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %71 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %72 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %73 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %74 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %75 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %76 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %77 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %78 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %79 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %80 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %81 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %82 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %83 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %84 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %85 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %86 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %87 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %88 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %89 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %90 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %91 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %92 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %93 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %94 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %95 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %96 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %97 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %98 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %99 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %100 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %101 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %102 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %103 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %104 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %105 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %106 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %107 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %108 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %109 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %110 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %111 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %112 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %113 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %114 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %115 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %116 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %117 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %118 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %119 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %120 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %121 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %122 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %123 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %124 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %125 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %126 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %127 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %128 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %129 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %130 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %131 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %132 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %133 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %134 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %135 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %136 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %137 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %138 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %139 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %140 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %141 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %142 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %143 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %144 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %145 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %146 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %147 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %148 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %149 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %150 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %151 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %152 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %153 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %154 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %155 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %156 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %157 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %158 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %159 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %160 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %161 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %162 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %163 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %164 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %165 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %166 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %167 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %168 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %169 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %170 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %171 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %172 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %173 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %174 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %175 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %176 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %177 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %178 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %179 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %180 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %181 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %182 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %183 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %184 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %185 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %186 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %187 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %188 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %189 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %190 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %191 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %192 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %193 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %194 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %195 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %196 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %197 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %198 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %199 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %200 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %201 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %202 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %203 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %204 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %205 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %206 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %207 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %208 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %209 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %210 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %211 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %212 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %213 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %214 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %215 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %216 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %217 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %218 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %219 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %220 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %221 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %222 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %223 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %224 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %225 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %226 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %227 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %228 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %229 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %230 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %231 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %232 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %233 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %234 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %235 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %236 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %237 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %238 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %239 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %240 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %241 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %242 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %243 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %244 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %245 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %246 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %247 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %248 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %249 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %250 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %251 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %252 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %253 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %254 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %255 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %256 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %257 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %258 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %259 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %260 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %261 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %262 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %263 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %264 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %265 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %266 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %267 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %268 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %269 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %270 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %271 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %272 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %273 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %274 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %275 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %276 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %277 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %278 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %279 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %280 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %281 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %282 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %283 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %284 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %285 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %286 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %287 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %288 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %289 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %290 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %291 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %292 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %293 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %294 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %295 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %296 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %297 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %298 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %299 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %300 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %301 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %302 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %303 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %304 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %305 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %306 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %307 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %308 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %309 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %310 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %311 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %312 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %313 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %314 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %315 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %316 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %317 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %318 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %319 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %320 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %321 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %322 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %323 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %324 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %325 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %326 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %327 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %328 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %329 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %330 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %331 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %332 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %333 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %334 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %335 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %336 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %337 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %338 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %339 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %340 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %341 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %342 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %343 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %344 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %345 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %346 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %347 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %348 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %349 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %350 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %351 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %352 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %353 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %354 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %355 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %356 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %357 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %358 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %359 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %360 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %361 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %362 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %363 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %364 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %365 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %366 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %367 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %368 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %369 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %370 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %371 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %372 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %373 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %374 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %375 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %376 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %377 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %378 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %379 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %380 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %381 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %382 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %383 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %384 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %385 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %386 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %387 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %388 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %389 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %390 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %391 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %392 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %393 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %394 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %395 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %396 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %397 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %398 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %399 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %400 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %401 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %402 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %403 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %404 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %405 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %406 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %407 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %408 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %409 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %410 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %411 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %412 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %413 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %414 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %415 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %416 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %417 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %418 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %419 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %420 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %421 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %422 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %423 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %424 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %425 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %426 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %427 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %428 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %429 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %430 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %431 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %432 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %433 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %434 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %435 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %436 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %437 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %438 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %439 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %440 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %441 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %442 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %443 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %444 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %445 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %446 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %447 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %448 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %449 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %450 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %451 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %452 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %453 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %454 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %455 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %456 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %457 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %458 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %459 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %460 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %461 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %462 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %463 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %464 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %465 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %466 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %467 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %468 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %469 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %470 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %471 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %472 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %473 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %474 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %475 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %476 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %477 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %478 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %479 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %480 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %481 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %482 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %483 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %484 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %485 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %486 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %487 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %488 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %489 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %490 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %491 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %492 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %493 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %494 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %495 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %496 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %497 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %498 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %499 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %500 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %501 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %502 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %503 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %504 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %505 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %506 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %507 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %508 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %509 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %510 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %511 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %512 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %513 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %514 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %515 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %516 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %517 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %518 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %519 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %520 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %521 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %522 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %523 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %524 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %525 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %526 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %527 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %528 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %529 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %530 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %531 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %532 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %533 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %534 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %535 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %536 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %537 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %538 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %539 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %540 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %541 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %542 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %543 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %544 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %545 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %546 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %547 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %548 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %549 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %550 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %551 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %552 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %553 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %554 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %555 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %556 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %557 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %558 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %559 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %560 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %561 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %562 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %563 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %564 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %565 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %566 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %567 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %568 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %569 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %570 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %571 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %572 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %573 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %574 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %575 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %576 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %577 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %578 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %579 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %580 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %581 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %582 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %583 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %584 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %585 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %586 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %587 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %588 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %589 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %590 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %591 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %592 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %593 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %594 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %595 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %596 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %597 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %598 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %599 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %600 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %601 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %602 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %603 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %604 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %605 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %606 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %607 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %608 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %609 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %610 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %611 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %612 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %613 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %614 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %615 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %616 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %617 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %618 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %619 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %620 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %621 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %622 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %623 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %624 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %625 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %626 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %627 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %628 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %629 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %630 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %631 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %632 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %633 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %634 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %635 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %636 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %637 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %638 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %639 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %640 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %641 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %642 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %643 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %644 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %645 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %646 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %647 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %648 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %649 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %650 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %651 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %652 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %653 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %654 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %655 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %656 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %657 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %658 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %659 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %660 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %661 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %662 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %663 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %664 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %665 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %666 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %667 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %668 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %669 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %670 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %671 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %672 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %673 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %674 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %675 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %676 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %677 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %678 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %679 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %680 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %681 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %682 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %683 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %684 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %685 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %686 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %687 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %688 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %689 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %690 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %691 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %692 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %693 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %694 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %695 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %696 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %697 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %698 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %699 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %700 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %701 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %702 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %703 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %704 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %705 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %706 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %707 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %708 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %709 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %710 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %711 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %712 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %713 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %714 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %715 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %716 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %717 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %718 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %719 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %720 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %721 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %722 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %723 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %724 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %725 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %726 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %727 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %728 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %729 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %730 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %731 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %732 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %733 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %734 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %735 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %736 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %737 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %738 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %739 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %740 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %741 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %742 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %743 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %744 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %745 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %746 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %747 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %748 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %749 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %750 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %751 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %752 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %753 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %754 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %755 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %756 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %757 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %758 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %759 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %760 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %761 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %762 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %763 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %764 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %765 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %766 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %767 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %768 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %769 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %770 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %771 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %772 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %773 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %774 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %775 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %776 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %777 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %778 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %779 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %780 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %781 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %782 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %783 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %784 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %785 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %786 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %787 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %788 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %789 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %790 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %791 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %792 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %793 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %794 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %795 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %796 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %797 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %798 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %799 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %800 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %801 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %802 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %803 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %804 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %805 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %806 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %807 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %808 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %809 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %810 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %811 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %812 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %813 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %814 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %815 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %816 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %817 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %818 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %819 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %820 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %821 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %822 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %823 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %824 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %825 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %826 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %827 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %828 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %829 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %830 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %831 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %832 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %833 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %834 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %835 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %836 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %837 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %838 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %839 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %840 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %841 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %842 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %843 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %844 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %845 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %846 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %847 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %848 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %849 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %850 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %851 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %852 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %853 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %854 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %855 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %856 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %857 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %858 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %859 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %860 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %861 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %862 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %863 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %864 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %865 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %866 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %867 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %868 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %869 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %870 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %871 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %872 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %873 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %874 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %875 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %876 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %877 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %878 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %879 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %880 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %881 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %882 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %883 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %884 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %885 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %886 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %887 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %888 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %889 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %890 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %891 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %892 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %893 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %894 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %895 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %896 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %897 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %898 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %899 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %900 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %901 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %902 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %903 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %904 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %905 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %906 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %907 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %908 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %909 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %910 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %911 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %912 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %913 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %914 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %915 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %916 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %917 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %918 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %919 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %920 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %921 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %922 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %923 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %924 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %925 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %926 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %927 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %928 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %929 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %930 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %931 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %932 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %933 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %934 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %935 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %936 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %937 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %938 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %939 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %940 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %941 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %942 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %943 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %944 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %945 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %946 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %947 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %948 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %949 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %950 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %951 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %952 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %953 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %954 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %955 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %956 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %957 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %958 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %959 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %960 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %961 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %962 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %963 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %964 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %965 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %966 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %967 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %968 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %969 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %970 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %971 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %972 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %973 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %974 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %975 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %976 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %977 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %978 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %979 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %980 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %981 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %982 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %983 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %984 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %985 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %986 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %987 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %988 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %989 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %990 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %991 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %992 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %993 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %994 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %995 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %996 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %997 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %998 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %999 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1000 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1001 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %1002 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1003 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %1004 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %1005 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1006 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1007 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %1008 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1009 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %1010 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %1011 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1012 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1013 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %1014 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %1015 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %1016 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %1017 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %1018 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1019 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1020 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1021 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %1022 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %1023 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %1024 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %1025 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %1026 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %1027 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %1028 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %1029 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1030 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1031 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %1032 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1033 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1034 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1035 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %1036 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1037 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %1038 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %1039 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1040 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1041 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %1042 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1043 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %1044 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %1045 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1046 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1047 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %1048 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1049 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %1050 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %1051 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1052 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1053 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %1054 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1055 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %1056 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %1057 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1058 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1059 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %1060 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %1061 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %1062 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %1063 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %1064 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1065 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1066 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1067 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %1068 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/output/dense/kernel" : !iree.ptr<tensor<128x128xf32>> - %1069 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/self/key/bias" : !iree.ptr<tensor<128xf32>> - %1070 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/self/key/kernel" : !iree.ptr<tensor<128x128xf32>> - %1071 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/self/query/bias" : !iree.ptr<tensor<128xf32>> - %1072 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/self/query/kernel" : !iree.ptr<tensor<128x128xf32>> - %1073 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/self/value/bias" : !iree.ptr<tensor<128xf32>> - %1074 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/self/value/kernel" : !iree.ptr<tensor<512x128xf32>> - %1075 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/attention/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1076 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/attention/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1077 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/attention/dense/bias" : !iree.ptr<tensor<128xf32>> - %1078 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/attention/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1079 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/input/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1080 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/input/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1081 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/input/dense/bias" : !iree.ptr<tensor<128xf32>> - %1082 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/input/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1083 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_0/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %1084 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_0/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %1085 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_0/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1086 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_0/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1087 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_0/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %1088 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_0/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1089 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_1/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %1090 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_1/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %1091 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_1/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1092 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_1/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1093 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_1/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %1094 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_1/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1095 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_2/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %1096 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_2/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %1097 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_2/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1098 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_2/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1099 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_2/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %1100 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_2/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1101 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/intermediate/dense/bias" : !iree.ptr<tensor<512xf32>> - %1102 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/intermediate/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %1103 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/FakeLayerNorm/beta" : !iree.ptr<tensor<128xf32>> - %1104 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/FakeLayerNorm/gamma" : !iree.ptr<tensor<128xf32>> - %1105 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/bottleneck/FakeLayerNorm/beta" : !iree.ptr<tensor<512xf32>> - %1106 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/bottleneck/FakeLayerNorm/gamma" : !iree.ptr<tensor<512xf32>> - %1107 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/bottleneck/dense/bias" : !iree.ptr<tensor<512xf32>> - %1108 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/bottleneck/dense/kernel" : !iree.ptr<tensor<128x512xf32>> - %1109 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/dense/bias" : !iree.ptr<tensor<128xf32>> - %1110 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/dense/kernel" : !iree.ptr<tensor<512x128xf32>> - %1111 = flow.variable.address @"__iree_flow_cls/squad/output_bias" : !iree.ptr<tensor<2xf32>> - %1112 = flow.variable.address @"__iree_flow_cls/squad/output_weights" : !iree.ptr<tensor<2x512xf32>> + %arg0 = util.unfoldable_constant dense<0> : tensor<1x384xi32> + %arg1 = util.unfoldable_constant dense<0> : tensor<1x384xi32> + %arg2 = util.unfoldable_constant dense<0> : tensor<1x384xi32> + %0 = flow.variable.address @"__iree_flow_bert/embeddings/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %1 = flow.variable.address @"__iree_flow_bert/embeddings/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %2 = flow.variable.address @"__iree_flow_bert/embeddings/embedding_transformation/bias" : !util.ptr<tensor<512xf32>> + %3 = flow.variable.address @"__iree_flow_bert/embeddings/embedding_transformation/kernel" : !util.ptr<tensor<384x512xf32>> + %4 = flow.variable.address @"__iree_flow_bert/embeddings/position_embeddings" : !util.ptr<tensor<512x512xf32>> + %5 = flow.variable.address @"__iree_flow_bert/embeddings/token_type_embeddings" : !util.ptr<tensor<2x512xf32>> + %6 = flow.variable.address @"__iree_flow_bert/embeddings/word_embeddings" : !util.ptr<tensor<30522x128xf32>> + %7 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %8 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %9 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %10 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %11 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %12 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %13 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %14 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %15 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %16 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %17 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %18 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %19 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %20 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %21 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %22 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %23 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %24 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %25 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %26 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %27 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %28 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %29 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %30 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %31 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %32 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %33 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %34 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %35 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %36 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %37 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %38 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %39 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %40 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %41 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %42 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %43 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %44 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %45 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %46 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %47 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %48 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %49 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %50 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %51 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %52 = flow.variable.address @"__iree_flow_bert/encoder/layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %53 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %54 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %55 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %56 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %57 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %58 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %59 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %60 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %61 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %62 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %63 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %64 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %65 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %66 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %67 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %68 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %69 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %70 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %71 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %72 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %73 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %74 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %75 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %76 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %77 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %78 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %79 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %80 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %81 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %82 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %83 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %84 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %85 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %86 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %87 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %88 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %89 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %90 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %91 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %92 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %93 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %94 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %95 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %96 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %97 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %98 = flow.variable.address @"__iree_flow_bert/encoder/layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %99 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %100 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %101 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %102 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %103 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %104 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %105 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %106 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %107 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %108 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %109 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %110 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %111 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %112 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %113 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %114 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %115 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %116 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %117 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %118 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %119 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %120 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %121 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %122 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %123 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %124 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %125 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %126 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %127 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %128 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %129 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %130 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %131 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %132 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %133 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %134 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %135 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %136 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %137 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %138 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %139 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %140 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %141 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %142 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %143 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/dense/bias" : !util.ptr<tensor<128xf32>> + %144 = flow.variable.address @"__iree_flow_bert/encoder/layer_10/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %145 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %146 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %147 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %148 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %149 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %150 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %151 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %152 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %153 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %154 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %155 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %156 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %157 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %158 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %159 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %160 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %161 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %162 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %163 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %164 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %165 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %166 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %167 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %168 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %169 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %170 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %171 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %172 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %173 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %174 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %175 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %176 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %177 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %178 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %179 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %180 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %181 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %182 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %183 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %184 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %185 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %186 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %187 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %188 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %189 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/dense/bias" : !util.ptr<tensor<128xf32>> + %190 = flow.variable.address @"__iree_flow_bert/encoder/layer_11/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %191 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %192 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %193 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %194 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %195 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %196 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %197 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %198 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %199 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %200 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %201 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %202 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %203 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %204 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %205 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %206 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %207 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %208 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %209 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %210 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %211 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %212 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %213 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %214 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %215 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %216 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %217 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %218 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %219 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %220 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %221 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %222 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %223 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %224 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %225 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %226 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %227 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %228 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %229 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %230 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %231 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %232 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %233 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %234 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %235 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/dense/bias" : !util.ptr<tensor<128xf32>> + %236 = flow.variable.address @"__iree_flow_bert/encoder/layer_12/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %237 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %238 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %239 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %240 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %241 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %242 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %243 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %244 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %245 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %246 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %247 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %248 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %249 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %250 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %251 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %252 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %253 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %254 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %255 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %256 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %257 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %258 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %259 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %260 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %261 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %262 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %263 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %264 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %265 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %266 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %267 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %268 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %269 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %270 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %271 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %272 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %273 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %274 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %275 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %276 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %277 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %278 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %279 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %280 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %281 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/dense/bias" : !util.ptr<tensor<128xf32>> + %282 = flow.variable.address @"__iree_flow_bert/encoder/layer_13/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %283 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %284 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %285 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %286 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %287 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %288 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %289 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %290 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %291 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %292 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %293 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %294 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %295 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %296 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %297 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %298 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %299 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %300 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %301 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %302 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %303 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %304 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %305 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %306 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %307 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %308 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %309 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %310 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %311 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %312 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %313 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %314 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %315 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %316 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %317 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %318 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %319 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %320 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %321 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %322 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %323 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %324 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %325 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %326 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %327 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/dense/bias" : !util.ptr<tensor<128xf32>> + %328 = flow.variable.address @"__iree_flow_bert/encoder/layer_14/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %329 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %330 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %331 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %332 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %333 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %334 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %335 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %336 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %337 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %338 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %339 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %340 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %341 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %342 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %343 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %344 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %345 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %346 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %347 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %348 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %349 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %350 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %351 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %352 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %353 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %354 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %355 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %356 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %357 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %358 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %359 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %360 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %361 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %362 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %363 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %364 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %365 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %366 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %367 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %368 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %369 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %370 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %371 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %372 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %373 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/dense/bias" : !util.ptr<tensor<128xf32>> + %374 = flow.variable.address @"__iree_flow_bert/encoder/layer_15/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %375 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %376 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %377 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %378 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %379 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %380 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %381 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %382 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %383 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %384 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %385 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %386 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %387 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %388 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %389 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %390 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %391 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %392 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %393 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %394 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %395 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %396 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %397 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %398 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %399 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %400 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %401 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %402 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %403 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %404 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %405 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %406 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %407 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %408 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %409 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %410 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %411 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %412 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %413 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %414 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %415 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %416 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %417 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %418 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %419 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/dense/bias" : !util.ptr<tensor<128xf32>> + %420 = flow.variable.address @"__iree_flow_bert/encoder/layer_16/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %421 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %422 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %423 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %424 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %425 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %426 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %427 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %428 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %429 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %430 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %431 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %432 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %433 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %434 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %435 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %436 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %437 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %438 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %439 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %440 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %441 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %442 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %443 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %444 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %445 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %446 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %447 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %448 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %449 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %450 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %451 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %452 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %453 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %454 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %455 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %456 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %457 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %458 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %459 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %460 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %461 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %462 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %463 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %464 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %465 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/dense/bias" : !util.ptr<tensor<128xf32>> + %466 = flow.variable.address @"__iree_flow_bert/encoder/layer_17/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %467 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %468 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %469 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %470 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %471 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %472 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %473 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %474 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %475 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %476 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %477 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %478 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %479 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %480 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %481 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %482 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %483 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %484 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %485 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %486 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %487 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %488 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %489 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %490 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %491 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %492 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %493 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %494 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %495 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %496 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %497 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %498 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %499 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %500 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %501 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %502 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %503 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %504 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %505 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %506 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %507 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %508 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %509 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %510 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %511 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/dense/bias" : !util.ptr<tensor<128xf32>> + %512 = flow.variable.address @"__iree_flow_bert/encoder/layer_18/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %513 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %514 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %515 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %516 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %517 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %518 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %519 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %520 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %521 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %522 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %523 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %524 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %525 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %526 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %527 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %528 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %529 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %530 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %531 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %532 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %533 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %534 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %535 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %536 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %537 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %538 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %539 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %540 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %541 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %542 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %543 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %544 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %545 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %546 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %547 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %548 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %549 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %550 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %551 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %552 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %553 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %554 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %555 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %556 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %557 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/dense/bias" : !util.ptr<tensor<128xf32>> + %558 = flow.variable.address @"__iree_flow_bert/encoder/layer_19/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %559 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %560 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %561 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %562 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %563 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %564 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %565 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %566 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %567 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %568 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %569 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %570 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %571 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %572 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %573 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %574 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %575 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %576 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %577 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %578 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %579 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %580 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %581 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %582 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %583 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %584 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %585 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %586 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %587 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %588 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %589 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %590 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %591 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %592 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %593 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %594 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %595 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %596 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %597 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %598 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %599 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %600 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %601 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %602 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %603 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %604 = flow.variable.address @"__iree_flow_bert/encoder/layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %605 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %606 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %607 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %608 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %609 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %610 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %611 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %612 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %613 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %614 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %615 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %616 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %617 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %618 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %619 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %620 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %621 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %622 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %623 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %624 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %625 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %626 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %627 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %628 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %629 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %630 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %631 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %632 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %633 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %634 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %635 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %636 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %637 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %638 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %639 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %640 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %641 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %642 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %643 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %644 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %645 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %646 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %647 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %648 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %649 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/dense/bias" : !util.ptr<tensor<128xf32>> + %650 = flow.variable.address @"__iree_flow_bert/encoder/layer_20/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %651 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %652 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %653 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %654 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %655 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %656 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %657 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %658 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %659 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %660 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %661 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %662 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %663 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %664 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %665 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %666 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %667 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %668 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %669 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %670 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %671 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %672 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %673 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %674 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %675 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %676 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %677 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %678 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %679 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %680 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %681 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %682 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %683 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %684 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %685 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %686 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %687 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %688 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %689 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %690 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %691 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %692 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %693 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %694 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %695 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/dense/bias" : !util.ptr<tensor<128xf32>> + %696 = flow.variable.address @"__iree_flow_bert/encoder/layer_21/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %697 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %698 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %699 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %700 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %701 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %702 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %703 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %704 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %705 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %706 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %707 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %708 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %709 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %710 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %711 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %712 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %713 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %714 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %715 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %716 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %717 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %718 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %719 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %720 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %721 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %722 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %723 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %724 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %725 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %726 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %727 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %728 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %729 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %730 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %731 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %732 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %733 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %734 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %735 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %736 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %737 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %738 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %739 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %740 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %741 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/dense/bias" : !util.ptr<tensor<128xf32>> + %742 = flow.variable.address @"__iree_flow_bert/encoder/layer_22/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %743 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %744 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %745 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %746 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %747 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %748 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %749 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %750 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %751 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %752 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %753 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %754 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %755 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %756 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %757 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %758 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %759 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %760 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %761 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %762 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %763 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %764 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %765 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %766 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %767 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %768 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %769 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %770 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %771 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %772 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %773 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %774 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %775 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %776 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %777 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %778 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %779 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %780 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %781 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %782 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %783 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %784 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %785 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %786 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %787 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/dense/bias" : !util.ptr<tensor<128xf32>> + %788 = flow.variable.address @"__iree_flow_bert/encoder/layer_23/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %789 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %790 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %791 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %792 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %793 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %794 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %795 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %796 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %797 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %798 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %799 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %800 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %801 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %802 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %803 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %804 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %805 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %806 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %807 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %808 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %809 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %810 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %811 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %812 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %813 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %814 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %815 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %816 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %817 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %818 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %819 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %820 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %821 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %822 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %823 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %824 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %825 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %826 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %827 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %828 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %829 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %830 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %831 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %832 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %833 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/dense/bias" : !util.ptr<tensor<128xf32>> + %834 = flow.variable.address @"__iree_flow_bert/encoder/layer_3/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %835 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %836 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %837 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %838 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %839 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %840 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %841 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %842 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %843 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %844 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %845 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %846 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %847 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %848 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %849 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %850 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %851 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %852 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %853 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %854 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %855 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %856 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %857 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %858 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %859 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %860 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %861 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %862 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %863 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %864 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %865 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %866 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %867 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %868 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %869 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %870 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %871 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %872 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %873 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %874 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %875 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %876 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %877 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %878 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %879 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/dense/bias" : !util.ptr<tensor<128xf32>> + %880 = flow.variable.address @"__iree_flow_bert/encoder/layer_4/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %881 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %882 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %883 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %884 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %885 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %886 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %887 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %888 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %889 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %890 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %891 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %892 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %893 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %894 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %895 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %896 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %897 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %898 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %899 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %900 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %901 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %902 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %903 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %904 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %905 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %906 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %907 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %908 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %909 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %910 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %911 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %912 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %913 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %914 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %915 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %916 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %917 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %918 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %919 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %920 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %921 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %922 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %923 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %924 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %925 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/dense/bias" : !util.ptr<tensor<128xf32>> + %926 = flow.variable.address @"__iree_flow_bert/encoder/layer_5/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %927 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %928 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %929 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %930 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %931 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %932 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %933 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %934 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %935 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %936 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %937 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %938 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %939 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %940 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %941 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %942 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %943 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %944 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %945 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %946 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %947 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %948 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %949 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %950 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %951 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %952 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %953 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %954 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %955 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %956 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %957 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %958 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %959 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %960 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %961 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %962 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %963 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %964 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %965 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %966 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %967 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %968 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %969 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %970 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %971 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/dense/bias" : !util.ptr<tensor<128xf32>> + %972 = flow.variable.address @"__iree_flow_bert/encoder/layer_6/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %973 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %974 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %975 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %976 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %977 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %978 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %979 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %980 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %981 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %982 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %983 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %984 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %985 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %986 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %987 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %988 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %989 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %990 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %991 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %992 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %993 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %994 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %995 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %996 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %997 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %998 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %999 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1000 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1001 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %1002 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1003 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %1004 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %1005 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1006 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1007 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %1008 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1009 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %1010 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %1011 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1012 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1013 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %1014 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %1015 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %1016 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %1017 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/dense/bias" : !util.ptr<tensor<128xf32>> + %1018 = flow.variable.address @"__iree_flow_bert/encoder/layer_7/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1019 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1020 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1021 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %1022 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %1023 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %1024 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %1025 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %1026 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %1027 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %1028 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %1029 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1030 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1031 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %1032 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1033 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1034 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1035 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %1036 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1037 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %1038 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %1039 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1040 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1041 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %1042 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1043 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %1044 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %1045 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1046 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1047 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %1048 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1049 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %1050 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %1051 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1052 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1053 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %1054 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1055 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %1056 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %1057 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1058 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1059 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %1060 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %1061 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %1062 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %1063 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/dense/bias" : !util.ptr<tensor<128xf32>> + %1064 = flow.variable.address @"__iree_flow_bert/encoder/layer_8/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1065 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1066 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1067 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/output/dense/bias" : !util.ptr<tensor<128xf32>> + %1068 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/output/dense/kernel" : !util.ptr<tensor<128x128xf32>> + %1069 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/self/key/bias" : !util.ptr<tensor<128xf32>> + %1070 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/self/key/kernel" : !util.ptr<tensor<128x128xf32>> + %1071 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/self/query/bias" : !util.ptr<tensor<128xf32>> + %1072 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/self/query/kernel" : !util.ptr<tensor<128x128xf32>> + %1073 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/self/value/bias" : !util.ptr<tensor<128xf32>> + %1074 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/attention/self/value/kernel" : !util.ptr<tensor<512x128xf32>> + %1075 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/attention/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1076 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/attention/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1077 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/attention/dense/bias" : !util.ptr<tensor<128xf32>> + %1078 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/attention/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1079 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/input/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1080 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/input/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1081 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/input/dense/bias" : !util.ptr<tensor<128xf32>> + %1082 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/bottleneck/input/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1083 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_0/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %1084 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_0/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %1085 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_0/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1086 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_0/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1087 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_0/output/dense/bias" : !util.ptr<tensor<128xf32>> + %1088 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_0/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1089 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_1/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %1090 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_1/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %1091 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_1/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1092 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_1/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1093 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_1/output/dense/bias" : !util.ptr<tensor<128xf32>> + %1094 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_1/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1095 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_2/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %1096 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_2/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %1097 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_2/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1098 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_2/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1099 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_2/output/dense/bias" : !util.ptr<tensor<128xf32>> + %1100 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/ffn_layer_2/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1101 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/intermediate/dense/bias" : !util.ptr<tensor<512xf32>> + %1102 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/intermediate/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %1103 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/FakeLayerNorm/beta" : !util.ptr<tensor<128xf32>> + %1104 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/FakeLayerNorm/gamma" : !util.ptr<tensor<128xf32>> + %1105 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/bottleneck/FakeLayerNorm/beta" : !util.ptr<tensor<512xf32>> + %1106 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/bottleneck/FakeLayerNorm/gamma" : !util.ptr<tensor<512xf32>> + %1107 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/bottleneck/dense/bias" : !util.ptr<tensor<512xf32>> + %1108 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/bottleneck/dense/kernel" : !util.ptr<tensor<128x512xf32>> + %1109 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/dense/bias" : !util.ptr<tensor<128xf32>> + %1110 = flow.variable.address @"__iree_flow_bert/encoder/layer_9/output/dense/kernel" : !util.ptr<tensor<512x128xf32>> + %1111 = flow.variable.address @"__iree_flow_cls/squad/output_bias" : !util.ptr<tensor<2xf32>> + %1112 = flow.variable.address @"__iree_flow_cls/squad/output_weights" : !util.ptr<tensor<2x512xf32>> %1113 = mhlo.constant dense<-1.000000e+04> : tensor<1x1x384x384xf32> %1114 = mhlo.constant dense<0.176776692> : tensor<1x4x384x384xf32> %1115 = mhlo.constant dense<1.000000e+04> : tensor<1x1x384x384xf32> @@ -2238,1121 +2238,1121 @@ %1117 = mhlo.constant dense<0xFF800000> : tensor<f32> %1118 = mhlo.constant dense<0.000000e+00> : tensor<f32> %1119 = mhlo.constant dense<0.000000e+00> : tensor<1x384x512xf32> - %1120 = flow.variable.load.indirect %0 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1121 = flow.variable.load.indirect %1 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1122 = flow.variable.load.indirect %2 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1123 = flow.variable.load.indirect %3 : !iree.ptr<tensor<384x512xf32>> -> tensor<384x512xf32> - %1124 = flow.variable.load.indirect %4 : !iree.ptr<tensor<512x512xf32>> -> tensor<512x512xf32> + %1120 = flow.variable.load.indirect %0 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1121 = flow.variable.load.indirect %1 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1122 = flow.variable.load.indirect %2 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1123 = flow.variable.load.indirect %3 : !util.ptr<tensor<384x512xf32>> -> tensor<384x512xf32> + %1124 = flow.variable.load.indirect %4 : !util.ptr<tensor<512x512xf32>> -> tensor<512x512xf32> %1125 = "mhlo.slice"(%1124) {limit_indices = dense<[384, 512]> : tensor<2xi64>, start_indices = dense<0> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} : (tensor<512x512xf32>) -> tensor<384x512xf32> %1126 = "mhlo.reshape"(%1125) : (tensor<384x512xf32>) -> tensor<1x384x512xf32> - %1127 = flow.variable.load.indirect %5 : !iree.ptr<tensor<2x512xf32>> -> tensor<2x512xf32> - %1128 = flow.variable.load.indirect %6 : !iree.ptr<tensor<30522x128xf32>> -> tensor<30522x128xf32> - %1129 = flow.variable.load.indirect %7 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1130 = flow.variable.load.indirect %8 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1131 = flow.variable.load.indirect %9 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1132 = flow.variable.load.indirect %10 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1133 = flow.variable.load.indirect %11 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1134 = flow.variable.load.indirect %12 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1135 = flow.variable.load.indirect %13 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1136 = flow.variable.load.indirect %14 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1137 = flow.variable.load.indirect %15 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1138 = flow.variable.load.indirect %16 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1139 = flow.variable.load.indirect %17 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1140 = flow.variable.load.indirect %18 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1141 = flow.variable.load.indirect %19 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1142 = flow.variable.load.indirect %20 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1143 = flow.variable.load.indirect %21 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1144 = flow.variable.load.indirect %22 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1145 = flow.variable.load.indirect %23 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1146 = flow.variable.load.indirect %24 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1147 = flow.variable.load.indirect %25 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1148 = flow.variable.load.indirect %26 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1149 = flow.variable.load.indirect %27 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1150 = flow.variable.load.indirect %28 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1151 = flow.variable.load.indirect %29 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1152 = flow.variable.load.indirect %30 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1153 = flow.variable.load.indirect %31 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1154 = flow.variable.load.indirect %32 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1155 = flow.variable.load.indirect %33 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1156 = flow.variable.load.indirect %34 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1157 = flow.variable.load.indirect %35 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1158 = flow.variable.load.indirect %36 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1159 = flow.variable.load.indirect %37 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1160 = flow.variable.load.indirect %38 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1161 = flow.variable.load.indirect %39 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1162 = flow.variable.load.indirect %40 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1163 = flow.variable.load.indirect %41 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1164 = flow.variable.load.indirect %42 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1165 = flow.variable.load.indirect %43 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1166 = flow.variable.load.indirect %44 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1167 = flow.variable.load.indirect %45 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1168 = flow.variable.load.indirect %46 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1169 = flow.variable.load.indirect %47 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1170 = flow.variable.load.indirect %48 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1171 = flow.variable.load.indirect %49 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1172 = flow.variable.load.indirect %50 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1173 = flow.variable.load.indirect %51 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1174 = flow.variable.load.indirect %52 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1175 = flow.variable.load.indirect %53 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1176 = flow.variable.load.indirect %54 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1177 = flow.variable.load.indirect %55 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1178 = flow.variable.load.indirect %56 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1179 = flow.variable.load.indirect %57 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1180 = flow.variable.load.indirect %58 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1181 = flow.variable.load.indirect %59 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1182 = flow.variable.load.indirect %60 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1183 = flow.variable.load.indirect %61 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1184 = flow.variable.load.indirect %62 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1185 = flow.variable.load.indirect %63 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1186 = flow.variable.load.indirect %64 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1187 = flow.variable.load.indirect %65 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1188 = flow.variable.load.indirect %66 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1189 = flow.variable.load.indirect %67 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1190 = flow.variable.load.indirect %68 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1191 = flow.variable.load.indirect %69 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1192 = flow.variable.load.indirect %70 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1193 = flow.variable.load.indirect %71 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1194 = flow.variable.load.indirect %72 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1195 = flow.variable.load.indirect %73 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1196 = flow.variable.load.indirect %74 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1197 = flow.variable.load.indirect %75 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1198 = flow.variable.load.indirect %76 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1199 = flow.variable.load.indirect %77 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1200 = flow.variable.load.indirect %78 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1201 = flow.variable.load.indirect %79 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1202 = flow.variable.load.indirect %80 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1203 = flow.variable.load.indirect %81 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1204 = flow.variable.load.indirect %82 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1205 = flow.variable.load.indirect %83 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1206 = flow.variable.load.indirect %84 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1207 = flow.variable.load.indirect %85 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1208 = flow.variable.load.indirect %86 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1209 = flow.variable.load.indirect %87 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1210 = flow.variable.load.indirect %88 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1211 = flow.variable.load.indirect %89 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1212 = flow.variable.load.indirect %90 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1213 = flow.variable.load.indirect %91 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1214 = flow.variable.load.indirect %92 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1215 = flow.variable.load.indirect %93 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1216 = flow.variable.load.indirect %94 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1217 = flow.variable.load.indirect %95 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1218 = flow.variable.load.indirect %96 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1219 = flow.variable.load.indirect %97 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1220 = flow.variable.load.indirect %98 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1221 = flow.variable.load.indirect %99 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1222 = flow.variable.load.indirect %100 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1223 = flow.variable.load.indirect %101 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1224 = flow.variable.load.indirect %102 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1225 = flow.variable.load.indirect %103 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1226 = flow.variable.load.indirect %104 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1227 = flow.variable.load.indirect %105 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1228 = flow.variable.load.indirect %106 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1229 = flow.variable.load.indirect %107 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1230 = flow.variable.load.indirect %108 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1231 = flow.variable.load.indirect %109 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1232 = flow.variable.load.indirect %110 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1233 = flow.variable.load.indirect %111 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1234 = flow.variable.load.indirect %112 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1235 = flow.variable.load.indirect %113 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1236 = flow.variable.load.indirect %114 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1237 = flow.variable.load.indirect %115 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1238 = flow.variable.load.indirect %116 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1239 = flow.variable.load.indirect %117 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1240 = flow.variable.load.indirect %118 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1241 = flow.variable.load.indirect %119 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1242 = flow.variable.load.indirect %120 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1243 = flow.variable.load.indirect %121 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1244 = flow.variable.load.indirect %122 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1245 = flow.variable.load.indirect %123 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1246 = flow.variable.load.indirect %124 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1247 = flow.variable.load.indirect %125 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1248 = flow.variable.load.indirect %126 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1249 = flow.variable.load.indirect %127 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1250 = flow.variable.load.indirect %128 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1251 = flow.variable.load.indirect %129 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1252 = flow.variable.load.indirect %130 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1253 = flow.variable.load.indirect %131 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1254 = flow.variable.load.indirect %132 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1255 = flow.variable.load.indirect %133 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1256 = flow.variable.load.indirect %134 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1257 = flow.variable.load.indirect %135 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1258 = flow.variable.load.indirect %136 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1259 = flow.variable.load.indirect %137 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1260 = flow.variable.load.indirect %138 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1261 = flow.variable.load.indirect %139 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1262 = flow.variable.load.indirect %140 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1263 = flow.variable.load.indirect %141 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1264 = flow.variable.load.indirect %142 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1265 = flow.variable.load.indirect %143 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1266 = flow.variable.load.indirect %144 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1267 = flow.variable.load.indirect %145 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1268 = flow.variable.load.indirect %146 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1269 = flow.variable.load.indirect %147 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1270 = flow.variable.load.indirect %148 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1271 = flow.variable.load.indirect %149 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1272 = flow.variable.load.indirect %150 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1273 = flow.variable.load.indirect %151 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1274 = flow.variable.load.indirect %152 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1275 = flow.variable.load.indirect %153 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1276 = flow.variable.load.indirect %154 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1277 = flow.variable.load.indirect %155 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1278 = flow.variable.load.indirect %156 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1279 = flow.variable.load.indirect %157 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1280 = flow.variable.load.indirect %158 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1281 = flow.variable.load.indirect %159 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1282 = flow.variable.load.indirect %160 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1283 = flow.variable.load.indirect %161 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1284 = flow.variable.load.indirect %162 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1285 = flow.variable.load.indirect %163 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1286 = flow.variable.load.indirect %164 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1287 = flow.variable.load.indirect %165 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1288 = flow.variable.load.indirect %166 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1289 = flow.variable.load.indirect %167 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1290 = flow.variable.load.indirect %168 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1291 = flow.variable.load.indirect %169 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1292 = flow.variable.load.indirect %170 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1293 = flow.variable.load.indirect %171 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1294 = flow.variable.load.indirect %172 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1295 = flow.variable.load.indirect %173 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1296 = flow.variable.load.indirect %174 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1297 = flow.variable.load.indirect %175 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1298 = flow.variable.load.indirect %176 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1299 = flow.variable.load.indirect %177 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1300 = flow.variable.load.indirect %178 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1301 = flow.variable.load.indirect %179 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1302 = flow.variable.load.indirect %180 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1303 = flow.variable.load.indirect %181 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1304 = flow.variable.load.indirect %182 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1305 = flow.variable.load.indirect %183 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1306 = flow.variable.load.indirect %184 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1307 = flow.variable.load.indirect %185 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1308 = flow.variable.load.indirect %186 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1309 = flow.variable.load.indirect %187 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1310 = flow.variable.load.indirect %188 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1311 = flow.variable.load.indirect %189 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1312 = flow.variable.load.indirect %190 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1313 = flow.variable.load.indirect %191 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1314 = flow.variable.load.indirect %192 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1315 = flow.variable.load.indirect %193 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1316 = flow.variable.load.indirect %194 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1317 = flow.variable.load.indirect %195 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1318 = flow.variable.load.indirect %196 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1319 = flow.variable.load.indirect %197 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1320 = flow.variable.load.indirect %198 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1321 = flow.variable.load.indirect %199 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1322 = flow.variable.load.indirect %200 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1323 = flow.variable.load.indirect %201 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1324 = flow.variable.load.indirect %202 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1325 = flow.variable.load.indirect %203 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1326 = flow.variable.load.indirect %204 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1327 = flow.variable.load.indirect %205 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1328 = flow.variable.load.indirect %206 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1329 = flow.variable.load.indirect %207 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1330 = flow.variable.load.indirect %208 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1331 = flow.variable.load.indirect %209 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1332 = flow.variable.load.indirect %210 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1333 = flow.variable.load.indirect %211 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1334 = flow.variable.load.indirect %212 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1335 = flow.variable.load.indirect %213 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1336 = flow.variable.load.indirect %214 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1337 = flow.variable.load.indirect %215 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1338 = flow.variable.load.indirect %216 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1339 = flow.variable.load.indirect %217 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1340 = flow.variable.load.indirect %218 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1341 = flow.variable.load.indirect %219 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1342 = flow.variable.load.indirect %220 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1343 = flow.variable.load.indirect %221 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1344 = flow.variable.load.indirect %222 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1345 = flow.variable.load.indirect %223 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1346 = flow.variable.load.indirect %224 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1347 = flow.variable.load.indirect %225 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1348 = flow.variable.load.indirect %226 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1349 = flow.variable.load.indirect %227 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1350 = flow.variable.load.indirect %228 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1351 = flow.variable.load.indirect %229 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1352 = flow.variable.load.indirect %230 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1353 = flow.variable.load.indirect %231 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1354 = flow.variable.load.indirect %232 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1355 = flow.variable.load.indirect %233 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1356 = flow.variable.load.indirect %234 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1357 = flow.variable.load.indirect %235 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1358 = flow.variable.load.indirect %236 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1359 = flow.variable.load.indirect %237 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1360 = flow.variable.load.indirect %238 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1361 = flow.variable.load.indirect %239 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1362 = flow.variable.load.indirect %240 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1363 = flow.variable.load.indirect %241 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1364 = flow.variable.load.indirect %242 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1365 = flow.variable.load.indirect %243 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1366 = flow.variable.load.indirect %244 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1367 = flow.variable.load.indirect %245 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1368 = flow.variable.load.indirect %246 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1369 = flow.variable.load.indirect %247 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1370 = flow.variable.load.indirect %248 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1371 = flow.variable.load.indirect %249 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1372 = flow.variable.load.indirect %250 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1373 = flow.variable.load.indirect %251 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1374 = flow.variable.load.indirect %252 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1375 = flow.variable.load.indirect %253 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1376 = flow.variable.load.indirect %254 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1377 = flow.variable.load.indirect %255 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1378 = flow.variable.load.indirect %256 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1379 = flow.variable.load.indirect %257 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1380 = flow.variable.load.indirect %258 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1381 = flow.variable.load.indirect %259 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1382 = flow.variable.load.indirect %260 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1383 = flow.variable.load.indirect %261 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1384 = flow.variable.load.indirect %262 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1385 = flow.variable.load.indirect %263 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1386 = flow.variable.load.indirect %264 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1387 = flow.variable.load.indirect %265 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1388 = flow.variable.load.indirect %266 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1389 = flow.variable.load.indirect %267 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1390 = flow.variable.load.indirect %268 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1391 = flow.variable.load.indirect %269 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1392 = flow.variable.load.indirect %270 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1393 = flow.variable.load.indirect %271 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1394 = flow.variable.load.indirect %272 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1395 = flow.variable.load.indirect %273 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1396 = flow.variable.load.indirect %274 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1397 = flow.variable.load.indirect %275 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1398 = flow.variable.load.indirect %276 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1399 = flow.variable.load.indirect %277 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1400 = flow.variable.load.indirect %278 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1401 = flow.variable.load.indirect %279 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1402 = flow.variable.load.indirect %280 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1403 = flow.variable.load.indirect %281 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1404 = flow.variable.load.indirect %282 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1405 = flow.variable.load.indirect %283 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1406 = flow.variable.load.indirect %284 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1407 = flow.variable.load.indirect %285 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1408 = flow.variable.load.indirect %286 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1409 = flow.variable.load.indirect %287 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1410 = flow.variable.load.indirect %288 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1411 = flow.variable.load.indirect %289 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1412 = flow.variable.load.indirect %290 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1413 = flow.variable.load.indirect %291 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1414 = flow.variable.load.indirect %292 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1415 = flow.variable.load.indirect %293 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1416 = flow.variable.load.indirect %294 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1417 = flow.variable.load.indirect %295 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1418 = flow.variable.load.indirect %296 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1419 = flow.variable.load.indirect %297 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1420 = flow.variable.load.indirect %298 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1421 = flow.variable.load.indirect %299 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1422 = flow.variable.load.indirect %300 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1423 = flow.variable.load.indirect %301 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1424 = flow.variable.load.indirect %302 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1425 = flow.variable.load.indirect %303 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1426 = flow.variable.load.indirect %304 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1427 = flow.variable.load.indirect %305 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1428 = flow.variable.load.indirect %306 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1429 = flow.variable.load.indirect %307 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1430 = flow.variable.load.indirect %308 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1431 = flow.variable.load.indirect %309 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1432 = flow.variable.load.indirect %310 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1433 = flow.variable.load.indirect %311 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1434 = flow.variable.load.indirect %312 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1435 = flow.variable.load.indirect %313 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1436 = flow.variable.load.indirect %314 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1437 = flow.variable.load.indirect %315 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1438 = flow.variable.load.indirect %316 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1439 = flow.variable.load.indirect %317 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1440 = flow.variable.load.indirect %318 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1441 = flow.variable.load.indirect %319 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1442 = flow.variable.load.indirect %320 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1443 = flow.variable.load.indirect %321 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1444 = flow.variable.load.indirect %322 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1445 = flow.variable.load.indirect %323 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1446 = flow.variable.load.indirect %324 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1447 = flow.variable.load.indirect %325 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1448 = flow.variable.load.indirect %326 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1449 = flow.variable.load.indirect %327 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1450 = flow.variable.load.indirect %328 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1451 = flow.variable.load.indirect %329 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1452 = flow.variable.load.indirect %330 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1453 = flow.variable.load.indirect %331 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1454 = flow.variable.load.indirect %332 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1455 = flow.variable.load.indirect %333 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1456 = flow.variable.load.indirect %334 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1457 = flow.variable.load.indirect %335 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1458 = flow.variable.load.indirect %336 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1459 = flow.variable.load.indirect %337 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1460 = flow.variable.load.indirect %338 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1461 = flow.variable.load.indirect %339 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1462 = flow.variable.load.indirect %340 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1463 = flow.variable.load.indirect %341 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1464 = flow.variable.load.indirect %342 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1465 = flow.variable.load.indirect %343 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1466 = flow.variable.load.indirect %344 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1467 = flow.variable.load.indirect %345 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1468 = flow.variable.load.indirect %346 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1469 = flow.variable.load.indirect %347 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1470 = flow.variable.load.indirect %348 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1471 = flow.variable.load.indirect %349 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1472 = flow.variable.load.indirect %350 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1473 = flow.variable.load.indirect %351 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1474 = flow.variable.load.indirect %352 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1475 = flow.variable.load.indirect %353 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1476 = flow.variable.load.indirect %354 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1477 = flow.variable.load.indirect %355 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1478 = flow.variable.load.indirect %356 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1479 = flow.variable.load.indirect %357 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1480 = flow.variable.load.indirect %358 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1481 = flow.variable.load.indirect %359 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1482 = flow.variable.load.indirect %360 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1483 = flow.variable.load.indirect %361 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1484 = flow.variable.load.indirect %362 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1485 = flow.variable.load.indirect %363 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1486 = flow.variable.load.indirect %364 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1487 = flow.variable.load.indirect %365 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1488 = flow.variable.load.indirect %366 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1489 = flow.variable.load.indirect %367 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1490 = flow.variable.load.indirect %368 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1491 = flow.variable.load.indirect %369 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1492 = flow.variable.load.indirect %370 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1493 = flow.variable.load.indirect %371 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1494 = flow.variable.load.indirect %372 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1495 = flow.variable.load.indirect %373 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1496 = flow.variable.load.indirect %374 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1497 = flow.variable.load.indirect %375 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1498 = flow.variable.load.indirect %376 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1499 = flow.variable.load.indirect %377 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1500 = flow.variable.load.indirect %378 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1501 = flow.variable.load.indirect %379 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1502 = flow.variable.load.indirect %380 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1503 = flow.variable.load.indirect %381 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1504 = flow.variable.load.indirect %382 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1505 = flow.variable.load.indirect %383 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1506 = flow.variable.load.indirect %384 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1507 = flow.variable.load.indirect %385 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1508 = flow.variable.load.indirect %386 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1509 = flow.variable.load.indirect %387 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1510 = flow.variable.load.indirect %388 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1511 = flow.variable.load.indirect %389 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1512 = flow.variable.load.indirect %390 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1513 = flow.variable.load.indirect %391 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1514 = flow.variable.load.indirect %392 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1515 = flow.variable.load.indirect %393 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1516 = flow.variable.load.indirect %394 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1517 = flow.variable.load.indirect %395 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1518 = flow.variable.load.indirect %396 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1519 = flow.variable.load.indirect %397 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1520 = flow.variable.load.indirect %398 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1521 = flow.variable.load.indirect %399 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1522 = flow.variable.load.indirect %400 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1523 = flow.variable.load.indirect %401 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1524 = flow.variable.load.indirect %402 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1525 = flow.variable.load.indirect %403 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1526 = flow.variable.load.indirect %404 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1527 = flow.variable.load.indirect %405 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1528 = flow.variable.load.indirect %406 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1529 = flow.variable.load.indirect %407 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1530 = flow.variable.load.indirect %408 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1531 = flow.variable.load.indirect %409 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1532 = flow.variable.load.indirect %410 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1533 = flow.variable.load.indirect %411 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1534 = flow.variable.load.indirect %412 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1535 = flow.variable.load.indirect %413 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1536 = flow.variable.load.indirect %414 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1537 = flow.variable.load.indirect %415 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1538 = flow.variable.load.indirect %416 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1539 = flow.variable.load.indirect %417 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1540 = flow.variable.load.indirect %418 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1541 = flow.variable.load.indirect %419 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1542 = flow.variable.load.indirect %420 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1543 = flow.variable.load.indirect %421 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1544 = flow.variable.load.indirect %422 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1545 = flow.variable.load.indirect %423 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1546 = flow.variable.load.indirect %424 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1547 = flow.variable.load.indirect %425 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1548 = flow.variable.load.indirect %426 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1549 = flow.variable.load.indirect %427 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1550 = flow.variable.load.indirect %428 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1551 = flow.variable.load.indirect %429 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1552 = flow.variable.load.indirect %430 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1553 = flow.variable.load.indirect %431 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1554 = flow.variable.load.indirect %432 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1555 = flow.variable.load.indirect %433 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1556 = flow.variable.load.indirect %434 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1557 = flow.variable.load.indirect %435 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1558 = flow.variable.load.indirect %436 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1559 = flow.variable.load.indirect %437 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1560 = flow.variable.load.indirect %438 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1561 = flow.variable.load.indirect %439 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1562 = flow.variable.load.indirect %440 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1563 = flow.variable.load.indirect %441 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1564 = flow.variable.load.indirect %442 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1565 = flow.variable.load.indirect %443 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1566 = flow.variable.load.indirect %444 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1567 = flow.variable.load.indirect %445 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1568 = flow.variable.load.indirect %446 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1569 = flow.variable.load.indirect %447 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1570 = flow.variable.load.indirect %448 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1571 = flow.variable.load.indirect %449 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1572 = flow.variable.load.indirect %450 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1573 = flow.variable.load.indirect %451 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1574 = flow.variable.load.indirect %452 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1575 = flow.variable.load.indirect %453 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1576 = flow.variable.load.indirect %454 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1577 = flow.variable.load.indirect %455 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1578 = flow.variable.load.indirect %456 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1579 = flow.variable.load.indirect %457 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1580 = flow.variable.load.indirect %458 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1581 = flow.variable.load.indirect %459 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1582 = flow.variable.load.indirect %460 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1583 = flow.variable.load.indirect %461 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1584 = flow.variable.load.indirect %462 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1585 = flow.variable.load.indirect %463 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1586 = flow.variable.load.indirect %464 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1587 = flow.variable.load.indirect %465 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1588 = flow.variable.load.indirect %466 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1589 = flow.variable.load.indirect %467 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1590 = flow.variable.load.indirect %468 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1591 = flow.variable.load.indirect %469 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1592 = flow.variable.load.indirect %470 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1593 = flow.variable.load.indirect %471 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1594 = flow.variable.load.indirect %472 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1595 = flow.variable.load.indirect %473 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1596 = flow.variable.load.indirect %474 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1597 = flow.variable.load.indirect %475 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1598 = flow.variable.load.indirect %476 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1599 = flow.variable.load.indirect %477 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1600 = flow.variable.load.indirect %478 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1601 = flow.variable.load.indirect %479 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1602 = flow.variable.load.indirect %480 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1603 = flow.variable.load.indirect %481 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1604 = flow.variable.load.indirect %482 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1605 = flow.variable.load.indirect %483 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1606 = flow.variable.load.indirect %484 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1607 = flow.variable.load.indirect %485 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1608 = flow.variable.load.indirect %486 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1609 = flow.variable.load.indirect %487 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1610 = flow.variable.load.indirect %488 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1611 = flow.variable.load.indirect %489 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1612 = flow.variable.load.indirect %490 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1613 = flow.variable.load.indirect %491 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1614 = flow.variable.load.indirect %492 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1615 = flow.variable.load.indirect %493 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1616 = flow.variable.load.indirect %494 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1617 = flow.variable.load.indirect %495 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1618 = flow.variable.load.indirect %496 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1619 = flow.variable.load.indirect %497 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1620 = flow.variable.load.indirect %498 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1621 = flow.variable.load.indirect %499 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1622 = flow.variable.load.indirect %500 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1623 = flow.variable.load.indirect %501 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1624 = flow.variable.load.indirect %502 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1625 = flow.variable.load.indirect %503 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1626 = flow.variable.load.indirect %504 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1627 = flow.variable.load.indirect %505 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1628 = flow.variable.load.indirect %506 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1629 = flow.variable.load.indirect %507 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1630 = flow.variable.load.indirect %508 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1631 = flow.variable.load.indirect %509 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1632 = flow.variable.load.indirect %510 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1633 = flow.variable.load.indirect %511 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1634 = flow.variable.load.indirect %512 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1635 = flow.variable.load.indirect %513 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1636 = flow.variable.load.indirect %514 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1637 = flow.variable.load.indirect %515 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1638 = flow.variable.load.indirect %516 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1639 = flow.variable.load.indirect %517 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1640 = flow.variable.load.indirect %518 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1641 = flow.variable.load.indirect %519 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1642 = flow.variable.load.indirect %520 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1643 = flow.variable.load.indirect %521 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1644 = flow.variable.load.indirect %522 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1645 = flow.variable.load.indirect %523 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1646 = flow.variable.load.indirect %524 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1647 = flow.variable.load.indirect %525 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1648 = flow.variable.load.indirect %526 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1649 = flow.variable.load.indirect %527 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1650 = flow.variable.load.indirect %528 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1651 = flow.variable.load.indirect %529 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1652 = flow.variable.load.indirect %530 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1653 = flow.variable.load.indirect %531 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1654 = flow.variable.load.indirect %532 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1655 = flow.variable.load.indirect %533 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1656 = flow.variable.load.indirect %534 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1657 = flow.variable.load.indirect %535 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1658 = flow.variable.load.indirect %536 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1659 = flow.variable.load.indirect %537 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1660 = flow.variable.load.indirect %538 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1661 = flow.variable.load.indirect %539 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1662 = flow.variable.load.indirect %540 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1663 = flow.variable.load.indirect %541 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1664 = flow.variable.load.indirect %542 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1665 = flow.variable.load.indirect %543 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1666 = flow.variable.load.indirect %544 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1667 = flow.variable.load.indirect %545 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1668 = flow.variable.load.indirect %546 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1669 = flow.variable.load.indirect %547 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1670 = flow.variable.load.indirect %548 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1671 = flow.variable.load.indirect %549 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1672 = flow.variable.load.indirect %550 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1673 = flow.variable.load.indirect %551 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1674 = flow.variable.load.indirect %552 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1675 = flow.variable.load.indirect %553 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1676 = flow.variable.load.indirect %554 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1677 = flow.variable.load.indirect %555 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1678 = flow.variable.load.indirect %556 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1679 = flow.variable.load.indirect %557 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1680 = flow.variable.load.indirect %558 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1681 = flow.variable.load.indirect %559 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1682 = flow.variable.load.indirect %560 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1683 = flow.variable.load.indirect %561 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1684 = flow.variable.load.indirect %562 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1685 = flow.variable.load.indirect %563 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1686 = flow.variable.load.indirect %564 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1687 = flow.variable.load.indirect %565 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1688 = flow.variable.load.indirect %566 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1689 = flow.variable.load.indirect %567 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1690 = flow.variable.load.indirect %568 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1691 = flow.variable.load.indirect %569 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1692 = flow.variable.load.indirect %570 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1693 = flow.variable.load.indirect %571 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1694 = flow.variable.load.indirect %572 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1695 = flow.variable.load.indirect %573 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1696 = flow.variable.load.indirect %574 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1697 = flow.variable.load.indirect %575 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1698 = flow.variable.load.indirect %576 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1699 = flow.variable.load.indirect %577 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1700 = flow.variable.load.indirect %578 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1701 = flow.variable.load.indirect %579 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1702 = flow.variable.load.indirect %580 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1703 = flow.variable.load.indirect %581 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1704 = flow.variable.load.indirect %582 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1705 = flow.variable.load.indirect %583 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1706 = flow.variable.load.indirect %584 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1707 = flow.variable.load.indirect %585 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1708 = flow.variable.load.indirect %586 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1709 = flow.variable.load.indirect %587 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1710 = flow.variable.load.indirect %588 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1711 = flow.variable.load.indirect %589 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1712 = flow.variable.load.indirect %590 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1713 = flow.variable.load.indirect %591 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1714 = flow.variable.load.indirect %592 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1715 = flow.variable.load.indirect %593 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1716 = flow.variable.load.indirect %594 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1717 = flow.variable.load.indirect %595 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1718 = flow.variable.load.indirect %596 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1719 = flow.variable.load.indirect %597 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1720 = flow.variable.load.indirect %598 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1721 = flow.variable.load.indirect %599 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1722 = flow.variable.load.indirect %600 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1723 = flow.variable.load.indirect %601 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1724 = flow.variable.load.indirect %602 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1725 = flow.variable.load.indirect %603 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1726 = flow.variable.load.indirect %604 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1727 = flow.variable.load.indirect %605 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1728 = flow.variable.load.indirect %606 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1729 = flow.variable.load.indirect %607 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1730 = flow.variable.load.indirect %608 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1731 = flow.variable.load.indirect %609 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1732 = flow.variable.load.indirect %610 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1733 = flow.variable.load.indirect %611 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1734 = flow.variable.load.indirect %612 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1735 = flow.variable.load.indirect %613 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1736 = flow.variable.load.indirect %614 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1737 = flow.variable.load.indirect %615 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1738 = flow.variable.load.indirect %616 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1739 = flow.variable.load.indirect %617 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1740 = flow.variable.load.indirect %618 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1741 = flow.variable.load.indirect %619 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1742 = flow.variable.load.indirect %620 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1743 = flow.variable.load.indirect %621 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1744 = flow.variable.load.indirect %622 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1745 = flow.variable.load.indirect %623 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1746 = flow.variable.load.indirect %624 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1747 = flow.variable.load.indirect %625 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1748 = flow.variable.load.indirect %626 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1749 = flow.variable.load.indirect %627 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1750 = flow.variable.load.indirect %628 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1751 = flow.variable.load.indirect %629 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1752 = flow.variable.load.indirect %630 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1753 = flow.variable.load.indirect %631 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1754 = flow.variable.load.indirect %632 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1755 = flow.variable.load.indirect %633 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1756 = flow.variable.load.indirect %634 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1757 = flow.variable.load.indirect %635 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1758 = flow.variable.load.indirect %636 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1759 = flow.variable.load.indirect %637 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1760 = flow.variable.load.indirect %638 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1761 = flow.variable.load.indirect %639 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1762 = flow.variable.load.indirect %640 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1763 = flow.variable.load.indirect %641 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1764 = flow.variable.load.indirect %642 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1765 = flow.variable.load.indirect %643 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1766 = flow.variable.load.indirect %644 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1767 = flow.variable.load.indirect %645 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1768 = flow.variable.load.indirect %646 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1769 = flow.variable.load.indirect %647 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1770 = flow.variable.load.indirect %648 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1771 = flow.variable.load.indirect %649 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1772 = flow.variable.load.indirect %650 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1773 = flow.variable.load.indirect %651 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1774 = flow.variable.load.indirect %652 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1775 = flow.variable.load.indirect %653 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1776 = flow.variable.load.indirect %654 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1777 = flow.variable.load.indirect %655 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1778 = flow.variable.load.indirect %656 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1779 = flow.variable.load.indirect %657 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1780 = flow.variable.load.indirect %658 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1781 = flow.variable.load.indirect %659 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1782 = flow.variable.load.indirect %660 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1783 = flow.variable.load.indirect %661 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1784 = flow.variable.load.indirect %662 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1785 = flow.variable.load.indirect %663 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1786 = flow.variable.load.indirect %664 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1787 = flow.variable.load.indirect %665 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1788 = flow.variable.load.indirect %666 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1789 = flow.variable.load.indirect %667 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1790 = flow.variable.load.indirect %668 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1791 = flow.variable.load.indirect %669 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1792 = flow.variable.load.indirect %670 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1793 = flow.variable.load.indirect %671 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1794 = flow.variable.load.indirect %672 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1795 = flow.variable.load.indirect %673 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1796 = flow.variable.load.indirect %674 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1797 = flow.variable.load.indirect %675 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1798 = flow.variable.load.indirect %676 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1799 = flow.variable.load.indirect %677 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1800 = flow.variable.load.indirect %678 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1801 = flow.variable.load.indirect %679 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1802 = flow.variable.load.indirect %680 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1803 = flow.variable.load.indirect %681 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1804 = flow.variable.load.indirect %682 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1805 = flow.variable.load.indirect %683 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1806 = flow.variable.load.indirect %684 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1807 = flow.variable.load.indirect %685 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1808 = flow.variable.load.indirect %686 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1809 = flow.variable.load.indirect %687 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1810 = flow.variable.load.indirect %688 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1811 = flow.variable.load.indirect %689 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1812 = flow.variable.load.indirect %690 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1813 = flow.variable.load.indirect %691 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1814 = flow.variable.load.indirect %692 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1815 = flow.variable.load.indirect %693 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1816 = flow.variable.load.indirect %694 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1817 = flow.variable.load.indirect %695 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1818 = flow.variable.load.indirect %696 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1819 = flow.variable.load.indirect %697 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1820 = flow.variable.load.indirect %698 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1821 = flow.variable.load.indirect %699 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1822 = flow.variable.load.indirect %700 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1823 = flow.variable.load.indirect %701 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1824 = flow.variable.load.indirect %702 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1825 = flow.variable.load.indirect %703 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1826 = flow.variable.load.indirect %704 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1827 = flow.variable.load.indirect %705 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1828 = flow.variable.load.indirect %706 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1829 = flow.variable.load.indirect %707 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1830 = flow.variable.load.indirect %708 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1831 = flow.variable.load.indirect %709 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1832 = flow.variable.load.indirect %710 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1833 = flow.variable.load.indirect %711 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1834 = flow.variable.load.indirect %712 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1835 = flow.variable.load.indirect %713 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1836 = flow.variable.load.indirect %714 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1837 = flow.variable.load.indirect %715 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1838 = flow.variable.load.indirect %716 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1839 = flow.variable.load.indirect %717 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1840 = flow.variable.load.indirect %718 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1841 = flow.variable.load.indirect %719 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1842 = flow.variable.load.indirect %720 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1843 = flow.variable.load.indirect %721 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1844 = flow.variable.load.indirect %722 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1845 = flow.variable.load.indirect %723 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1846 = flow.variable.load.indirect %724 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1847 = flow.variable.load.indirect %725 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1848 = flow.variable.load.indirect %726 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1849 = flow.variable.load.indirect %727 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1850 = flow.variable.load.indirect %728 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1851 = flow.variable.load.indirect %729 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1852 = flow.variable.load.indirect %730 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1853 = flow.variable.load.indirect %731 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1854 = flow.variable.load.indirect %732 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1855 = flow.variable.load.indirect %733 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1856 = flow.variable.load.indirect %734 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1857 = flow.variable.load.indirect %735 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1858 = flow.variable.load.indirect %736 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1859 = flow.variable.load.indirect %737 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1860 = flow.variable.load.indirect %738 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1861 = flow.variable.load.indirect %739 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1862 = flow.variable.load.indirect %740 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1863 = flow.variable.load.indirect %741 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1864 = flow.variable.load.indirect %742 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1865 = flow.variable.load.indirect %743 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1866 = flow.variable.load.indirect %744 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1867 = flow.variable.load.indirect %745 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1868 = flow.variable.load.indirect %746 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1869 = flow.variable.load.indirect %747 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1870 = flow.variable.load.indirect %748 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1871 = flow.variable.load.indirect %749 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1872 = flow.variable.load.indirect %750 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1873 = flow.variable.load.indirect %751 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1874 = flow.variable.load.indirect %752 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1875 = flow.variable.load.indirect %753 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1876 = flow.variable.load.indirect %754 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1877 = flow.variable.load.indirect %755 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1878 = flow.variable.load.indirect %756 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1879 = flow.variable.load.indirect %757 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1880 = flow.variable.load.indirect %758 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1881 = flow.variable.load.indirect %759 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1882 = flow.variable.load.indirect %760 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1883 = flow.variable.load.indirect %761 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1884 = flow.variable.load.indirect %762 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1885 = flow.variable.load.indirect %763 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1886 = flow.variable.load.indirect %764 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1887 = flow.variable.load.indirect %765 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1888 = flow.variable.load.indirect %766 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1889 = flow.variable.load.indirect %767 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1890 = flow.variable.load.indirect %768 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1891 = flow.variable.load.indirect %769 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1892 = flow.variable.load.indirect %770 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1893 = flow.variable.load.indirect %771 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1894 = flow.variable.load.indirect %772 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1895 = flow.variable.load.indirect %773 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1896 = flow.variable.load.indirect %774 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1897 = flow.variable.load.indirect %775 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1898 = flow.variable.load.indirect %776 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1899 = flow.variable.load.indirect %777 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1900 = flow.variable.load.indirect %778 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1901 = flow.variable.load.indirect %779 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1902 = flow.variable.load.indirect %780 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1903 = flow.variable.load.indirect %781 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1904 = flow.variable.load.indirect %782 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1905 = flow.variable.load.indirect %783 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1906 = flow.variable.load.indirect %784 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1907 = flow.variable.load.indirect %785 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1908 = flow.variable.load.indirect %786 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1909 = flow.variable.load.indirect %787 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1910 = flow.variable.load.indirect %788 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1911 = flow.variable.load.indirect %789 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1912 = flow.variable.load.indirect %790 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1913 = flow.variable.load.indirect %791 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1914 = flow.variable.load.indirect %792 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1915 = flow.variable.load.indirect %793 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1916 = flow.variable.load.indirect %794 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1917 = flow.variable.load.indirect %795 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1918 = flow.variable.load.indirect %796 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1919 = flow.variable.load.indirect %797 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1920 = flow.variable.load.indirect %798 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1921 = flow.variable.load.indirect %799 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1922 = flow.variable.load.indirect %800 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1923 = flow.variable.load.indirect %801 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1924 = flow.variable.load.indirect %802 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1925 = flow.variable.load.indirect %803 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1926 = flow.variable.load.indirect %804 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1927 = flow.variable.load.indirect %805 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1928 = flow.variable.load.indirect %806 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1929 = flow.variable.load.indirect %807 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1930 = flow.variable.load.indirect %808 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1931 = flow.variable.load.indirect %809 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1932 = flow.variable.load.indirect %810 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1933 = flow.variable.load.indirect %811 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1934 = flow.variable.load.indirect %812 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1935 = flow.variable.load.indirect %813 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1936 = flow.variable.load.indirect %814 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1937 = flow.variable.load.indirect %815 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1938 = flow.variable.load.indirect %816 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1939 = flow.variable.load.indirect %817 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1940 = flow.variable.load.indirect %818 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1941 = flow.variable.load.indirect %819 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1942 = flow.variable.load.indirect %820 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1943 = flow.variable.load.indirect %821 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1944 = flow.variable.load.indirect %822 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1945 = flow.variable.load.indirect %823 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1946 = flow.variable.load.indirect %824 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1947 = flow.variable.load.indirect %825 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1948 = flow.variable.load.indirect %826 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1949 = flow.variable.load.indirect %827 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1950 = flow.variable.load.indirect %828 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1951 = flow.variable.load.indirect %829 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1952 = flow.variable.load.indirect %830 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1953 = flow.variable.load.indirect %831 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1954 = flow.variable.load.indirect %832 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1955 = flow.variable.load.indirect %833 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1956 = flow.variable.load.indirect %834 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1957 = flow.variable.load.indirect %835 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1958 = flow.variable.load.indirect %836 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1959 = flow.variable.load.indirect %837 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1960 = flow.variable.load.indirect %838 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1961 = flow.variable.load.indirect %839 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1962 = flow.variable.load.indirect %840 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1963 = flow.variable.load.indirect %841 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1964 = flow.variable.load.indirect %842 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %1965 = flow.variable.load.indirect %843 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1966 = flow.variable.load.indirect %844 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1967 = flow.variable.load.indirect %845 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1968 = flow.variable.load.indirect %846 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1969 = flow.variable.load.indirect %847 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1970 = flow.variable.load.indirect %848 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1971 = flow.variable.load.indirect %849 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1972 = flow.variable.load.indirect %850 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1973 = flow.variable.load.indirect %851 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1974 = flow.variable.load.indirect %852 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1975 = flow.variable.load.indirect %853 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1976 = flow.variable.load.indirect %854 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1977 = flow.variable.load.indirect %855 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1978 = flow.variable.load.indirect %856 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1979 = flow.variable.load.indirect %857 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1980 = flow.variable.load.indirect %858 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1981 = flow.variable.load.indirect %859 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1982 = flow.variable.load.indirect %860 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1983 = flow.variable.load.indirect %861 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1984 = flow.variable.load.indirect %862 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1985 = flow.variable.load.indirect %863 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1986 = flow.variable.load.indirect %864 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1987 = flow.variable.load.indirect %865 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1988 = flow.variable.load.indirect %866 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1989 = flow.variable.load.indirect %867 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1990 = flow.variable.load.indirect %868 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1991 = flow.variable.load.indirect %869 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1992 = flow.variable.load.indirect %870 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %1993 = flow.variable.load.indirect %871 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1994 = flow.variable.load.indirect %872 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %1995 = flow.variable.load.indirect %873 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1996 = flow.variable.load.indirect %874 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %1997 = flow.variable.load.indirect %875 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1998 = flow.variable.load.indirect %876 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %1999 = flow.variable.load.indirect %877 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2000 = flow.variable.load.indirect %878 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2001 = flow.variable.load.indirect %879 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2002 = flow.variable.load.indirect %880 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2003 = flow.variable.load.indirect %881 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2004 = flow.variable.load.indirect %882 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2005 = flow.variable.load.indirect %883 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2006 = flow.variable.load.indirect %884 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2007 = flow.variable.load.indirect %885 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2008 = flow.variable.load.indirect %886 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2009 = flow.variable.load.indirect %887 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2010 = flow.variable.load.indirect %888 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2011 = flow.variable.load.indirect %889 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2012 = flow.variable.load.indirect %890 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2013 = flow.variable.load.indirect %891 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2014 = flow.variable.load.indirect %892 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2015 = flow.variable.load.indirect %893 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2016 = flow.variable.load.indirect %894 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2017 = flow.variable.load.indirect %895 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2018 = flow.variable.load.indirect %896 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2019 = flow.variable.load.indirect %897 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2020 = flow.variable.load.indirect %898 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2021 = flow.variable.load.indirect %899 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2022 = flow.variable.load.indirect %900 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2023 = flow.variable.load.indirect %901 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2024 = flow.variable.load.indirect %902 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2025 = flow.variable.load.indirect %903 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2026 = flow.variable.load.indirect %904 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2027 = flow.variable.load.indirect %905 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2028 = flow.variable.load.indirect %906 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2029 = flow.variable.load.indirect %907 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2030 = flow.variable.load.indirect %908 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2031 = flow.variable.load.indirect %909 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2032 = flow.variable.load.indirect %910 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2033 = flow.variable.load.indirect %911 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2034 = flow.variable.load.indirect %912 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2035 = flow.variable.load.indirect %913 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2036 = flow.variable.load.indirect %914 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2037 = flow.variable.load.indirect %915 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2038 = flow.variable.load.indirect %916 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2039 = flow.variable.load.indirect %917 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2040 = flow.variable.load.indirect %918 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2041 = flow.variable.load.indirect %919 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2042 = flow.variable.load.indirect %920 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2043 = flow.variable.load.indirect %921 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2044 = flow.variable.load.indirect %922 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2045 = flow.variable.load.indirect %923 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2046 = flow.variable.load.indirect %924 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2047 = flow.variable.load.indirect %925 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2048 = flow.variable.load.indirect %926 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2049 = flow.variable.load.indirect %927 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2050 = flow.variable.load.indirect %928 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2051 = flow.variable.load.indirect %929 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2052 = flow.variable.load.indirect %930 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2053 = flow.variable.load.indirect %931 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2054 = flow.variable.load.indirect %932 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2055 = flow.variable.load.indirect %933 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2056 = flow.variable.load.indirect %934 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2057 = flow.variable.load.indirect %935 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2058 = flow.variable.load.indirect %936 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2059 = flow.variable.load.indirect %937 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2060 = flow.variable.load.indirect %938 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2061 = flow.variable.load.indirect %939 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2062 = flow.variable.load.indirect %940 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2063 = flow.variable.load.indirect %941 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2064 = flow.variable.load.indirect %942 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2065 = flow.variable.load.indirect %943 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2066 = flow.variable.load.indirect %944 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2067 = flow.variable.load.indirect %945 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2068 = flow.variable.load.indirect %946 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2069 = flow.variable.load.indirect %947 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2070 = flow.variable.load.indirect %948 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2071 = flow.variable.load.indirect %949 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2072 = flow.variable.load.indirect %950 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2073 = flow.variable.load.indirect %951 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2074 = flow.variable.load.indirect %952 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2075 = flow.variable.load.indirect %953 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2076 = flow.variable.load.indirect %954 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2077 = flow.variable.load.indirect %955 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2078 = flow.variable.load.indirect %956 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2079 = flow.variable.load.indirect %957 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2080 = flow.variable.load.indirect %958 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2081 = flow.variable.load.indirect %959 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2082 = flow.variable.load.indirect %960 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2083 = flow.variable.load.indirect %961 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2084 = flow.variable.load.indirect %962 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2085 = flow.variable.load.indirect %963 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2086 = flow.variable.load.indirect %964 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2087 = flow.variable.load.indirect %965 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2088 = flow.variable.load.indirect %966 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2089 = flow.variable.load.indirect %967 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2090 = flow.variable.load.indirect %968 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2091 = flow.variable.load.indirect %969 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2092 = flow.variable.load.indirect %970 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2093 = flow.variable.load.indirect %971 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2094 = flow.variable.load.indirect %972 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2095 = flow.variable.load.indirect %973 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2096 = flow.variable.load.indirect %974 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2097 = flow.variable.load.indirect %975 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2098 = flow.variable.load.indirect %976 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2099 = flow.variable.load.indirect %977 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2100 = flow.variable.load.indirect %978 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2101 = flow.variable.load.indirect %979 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2102 = flow.variable.load.indirect %980 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2103 = flow.variable.load.indirect %981 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2104 = flow.variable.load.indirect %982 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2105 = flow.variable.load.indirect %983 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2106 = flow.variable.load.indirect %984 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2107 = flow.variable.load.indirect %985 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2108 = flow.variable.load.indirect %986 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2109 = flow.variable.load.indirect %987 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2110 = flow.variable.load.indirect %988 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2111 = flow.variable.load.indirect %989 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2112 = flow.variable.load.indirect %990 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2113 = flow.variable.load.indirect %991 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2114 = flow.variable.load.indirect %992 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2115 = flow.variable.load.indirect %993 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2116 = flow.variable.load.indirect %994 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2117 = flow.variable.load.indirect %995 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2118 = flow.variable.load.indirect %996 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2119 = flow.variable.load.indirect %997 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2120 = flow.variable.load.indirect %998 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2121 = flow.variable.load.indirect %999 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2122 = flow.variable.load.indirect %1000 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2123 = flow.variable.load.indirect %1001 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2124 = flow.variable.load.indirect %1002 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2125 = flow.variable.load.indirect %1003 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2126 = flow.variable.load.indirect %1004 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2127 = flow.variable.load.indirect %1005 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2128 = flow.variable.load.indirect %1006 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2129 = flow.variable.load.indirect %1007 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2130 = flow.variable.load.indirect %1008 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2131 = flow.variable.load.indirect %1009 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2132 = flow.variable.load.indirect %1010 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2133 = flow.variable.load.indirect %1011 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2134 = flow.variable.load.indirect %1012 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2135 = flow.variable.load.indirect %1013 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2136 = flow.variable.load.indirect %1014 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2137 = flow.variable.load.indirect %1015 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2138 = flow.variable.load.indirect %1016 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2139 = flow.variable.load.indirect %1017 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2140 = flow.variable.load.indirect %1018 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2141 = flow.variable.load.indirect %1019 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2142 = flow.variable.load.indirect %1020 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2143 = flow.variable.load.indirect %1021 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2144 = flow.variable.load.indirect %1022 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2145 = flow.variable.load.indirect %1023 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2146 = flow.variable.load.indirect %1024 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2147 = flow.variable.load.indirect %1025 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2148 = flow.variable.load.indirect %1026 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2149 = flow.variable.load.indirect %1027 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2150 = flow.variable.load.indirect %1028 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2151 = flow.variable.load.indirect %1029 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2152 = flow.variable.load.indirect %1030 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2153 = flow.variable.load.indirect %1031 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2154 = flow.variable.load.indirect %1032 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2155 = flow.variable.load.indirect %1033 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2156 = flow.variable.load.indirect %1034 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2157 = flow.variable.load.indirect %1035 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2158 = flow.variable.load.indirect %1036 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2159 = flow.variable.load.indirect %1037 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2160 = flow.variable.load.indirect %1038 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2161 = flow.variable.load.indirect %1039 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2162 = flow.variable.load.indirect %1040 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2163 = flow.variable.load.indirect %1041 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2164 = flow.variable.load.indirect %1042 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2165 = flow.variable.load.indirect %1043 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2166 = flow.variable.load.indirect %1044 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2167 = flow.variable.load.indirect %1045 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2168 = flow.variable.load.indirect %1046 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2169 = flow.variable.load.indirect %1047 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2170 = flow.variable.load.indirect %1048 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2171 = flow.variable.load.indirect %1049 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2172 = flow.variable.load.indirect %1050 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2173 = flow.variable.load.indirect %1051 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2174 = flow.variable.load.indirect %1052 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2175 = flow.variable.load.indirect %1053 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2176 = flow.variable.load.indirect %1054 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2177 = flow.variable.load.indirect %1055 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2178 = flow.variable.load.indirect %1056 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2179 = flow.variable.load.indirect %1057 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2180 = flow.variable.load.indirect %1058 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2181 = flow.variable.load.indirect %1059 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2182 = flow.variable.load.indirect %1060 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2183 = flow.variable.load.indirect %1061 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2184 = flow.variable.load.indirect %1062 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2185 = flow.variable.load.indirect %1063 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2186 = flow.variable.load.indirect %1064 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2187 = flow.variable.load.indirect %1065 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2188 = flow.variable.load.indirect %1066 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2189 = flow.variable.load.indirect %1067 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2190 = flow.variable.load.indirect %1068 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2191 = flow.variable.load.indirect %1069 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2192 = flow.variable.load.indirect %1070 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2193 = flow.variable.load.indirect %1071 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2194 = flow.variable.load.indirect %1072 : !iree.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> - %2195 = flow.variable.load.indirect %1073 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2196 = flow.variable.load.indirect %1074 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2197 = flow.variable.load.indirect %1075 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2198 = flow.variable.load.indirect %1076 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2199 = flow.variable.load.indirect %1077 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2200 = flow.variable.load.indirect %1078 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2201 = flow.variable.load.indirect %1079 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2202 = flow.variable.load.indirect %1080 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2203 = flow.variable.load.indirect %1081 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2204 = flow.variable.load.indirect %1082 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2205 = flow.variable.load.indirect %1083 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2206 = flow.variable.load.indirect %1084 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2207 = flow.variable.load.indirect %1085 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2208 = flow.variable.load.indirect %1086 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2209 = flow.variable.load.indirect %1087 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2210 = flow.variable.load.indirect %1088 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2211 = flow.variable.load.indirect %1089 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2212 = flow.variable.load.indirect %1090 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2213 = flow.variable.load.indirect %1091 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2214 = flow.variable.load.indirect %1092 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2215 = flow.variable.load.indirect %1093 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2216 = flow.variable.load.indirect %1094 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2217 = flow.variable.load.indirect %1095 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2218 = flow.variable.load.indirect %1096 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2219 = flow.variable.load.indirect %1097 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2220 = flow.variable.load.indirect %1098 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2221 = flow.variable.load.indirect %1099 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2222 = flow.variable.load.indirect %1100 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2223 = flow.variable.load.indirect %1101 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2224 = flow.variable.load.indirect %1102 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2225 = flow.variable.load.indirect %1103 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2226 = flow.variable.load.indirect %1104 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2227 = flow.variable.load.indirect %1105 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2228 = flow.variable.load.indirect %1106 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2229 = flow.variable.load.indirect %1107 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %2230 = flow.variable.load.indirect %1108 : !iree.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> - %2231 = flow.variable.load.indirect %1109 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %2232 = flow.variable.load.indirect %1110 : !iree.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> - %2233 = flow.variable.load.indirect %1111 : !iree.ptr<tensor<2xf32>> -> tensor<2xf32> - %2234 = flow.variable.load.indirect %1112 : !iree.ptr<tensor<2x512xf32>> -> tensor<2x512xf32> + %1127 = flow.variable.load.indirect %5 : !util.ptr<tensor<2x512xf32>> -> tensor<2x512xf32> + %1128 = flow.variable.load.indirect %6 : !util.ptr<tensor<30522x128xf32>> -> tensor<30522x128xf32> + %1129 = flow.variable.load.indirect %7 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1130 = flow.variable.load.indirect %8 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1131 = flow.variable.load.indirect %9 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1132 = flow.variable.load.indirect %10 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1133 = flow.variable.load.indirect %11 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1134 = flow.variable.load.indirect %12 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1135 = flow.variable.load.indirect %13 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1136 = flow.variable.load.indirect %14 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1137 = flow.variable.load.indirect %15 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1138 = flow.variable.load.indirect %16 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1139 = flow.variable.load.indirect %17 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1140 = flow.variable.load.indirect %18 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1141 = flow.variable.load.indirect %19 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1142 = flow.variable.load.indirect %20 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1143 = flow.variable.load.indirect %21 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1144 = flow.variable.load.indirect %22 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1145 = flow.variable.load.indirect %23 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1146 = flow.variable.load.indirect %24 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1147 = flow.variable.load.indirect %25 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1148 = flow.variable.load.indirect %26 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1149 = flow.variable.load.indirect %27 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1150 = flow.variable.load.indirect %28 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1151 = flow.variable.load.indirect %29 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1152 = flow.variable.load.indirect %30 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1153 = flow.variable.load.indirect %31 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1154 = flow.variable.load.indirect %32 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1155 = flow.variable.load.indirect %33 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1156 = flow.variable.load.indirect %34 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1157 = flow.variable.load.indirect %35 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1158 = flow.variable.load.indirect %36 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1159 = flow.variable.load.indirect %37 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1160 = flow.variable.load.indirect %38 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1161 = flow.variable.load.indirect %39 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1162 = flow.variable.load.indirect %40 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1163 = flow.variable.load.indirect %41 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1164 = flow.variable.load.indirect %42 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1165 = flow.variable.load.indirect %43 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1166 = flow.variable.load.indirect %44 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1167 = flow.variable.load.indirect %45 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1168 = flow.variable.load.indirect %46 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1169 = flow.variable.load.indirect %47 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1170 = flow.variable.load.indirect %48 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1171 = flow.variable.load.indirect %49 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1172 = flow.variable.load.indirect %50 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1173 = flow.variable.load.indirect %51 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1174 = flow.variable.load.indirect %52 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1175 = flow.variable.load.indirect %53 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1176 = flow.variable.load.indirect %54 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1177 = flow.variable.load.indirect %55 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1178 = flow.variable.load.indirect %56 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1179 = flow.variable.load.indirect %57 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1180 = flow.variable.load.indirect %58 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1181 = flow.variable.load.indirect %59 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1182 = flow.variable.load.indirect %60 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1183 = flow.variable.load.indirect %61 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1184 = flow.variable.load.indirect %62 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1185 = flow.variable.load.indirect %63 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1186 = flow.variable.load.indirect %64 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1187 = flow.variable.load.indirect %65 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1188 = flow.variable.load.indirect %66 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1189 = flow.variable.load.indirect %67 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1190 = flow.variable.load.indirect %68 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1191 = flow.variable.load.indirect %69 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1192 = flow.variable.load.indirect %70 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1193 = flow.variable.load.indirect %71 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1194 = flow.variable.load.indirect %72 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1195 = flow.variable.load.indirect %73 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1196 = flow.variable.load.indirect %74 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1197 = flow.variable.load.indirect %75 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1198 = flow.variable.load.indirect %76 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1199 = flow.variable.load.indirect %77 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1200 = flow.variable.load.indirect %78 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1201 = flow.variable.load.indirect %79 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1202 = flow.variable.load.indirect %80 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1203 = flow.variable.load.indirect %81 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1204 = flow.variable.load.indirect %82 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1205 = flow.variable.load.indirect %83 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1206 = flow.variable.load.indirect %84 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1207 = flow.variable.load.indirect %85 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1208 = flow.variable.load.indirect %86 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1209 = flow.variable.load.indirect %87 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1210 = flow.variable.load.indirect %88 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1211 = flow.variable.load.indirect %89 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1212 = flow.variable.load.indirect %90 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1213 = flow.variable.load.indirect %91 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1214 = flow.variable.load.indirect %92 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1215 = flow.variable.load.indirect %93 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1216 = flow.variable.load.indirect %94 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1217 = flow.variable.load.indirect %95 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1218 = flow.variable.load.indirect %96 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1219 = flow.variable.load.indirect %97 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1220 = flow.variable.load.indirect %98 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1221 = flow.variable.load.indirect %99 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1222 = flow.variable.load.indirect %100 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1223 = flow.variable.load.indirect %101 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1224 = flow.variable.load.indirect %102 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1225 = flow.variable.load.indirect %103 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1226 = flow.variable.load.indirect %104 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1227 = flow.variable.load.indirect %105 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1228 = flow.variable.load.indirect %106 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1229 = flow.variable.load.indirect %107 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1230 = flow.variable.load.indirect %108 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1231 = flow.variable.load.indirect %109 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1232 = flow.variable.load.indirect %110 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1233 = flow.variable.load.indirect %111 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1234 = flow.variable.load.indirect %112 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1235 = flow.variable.load.indirect %113 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1236 = flow.variable.load.indirect %114 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1237 = flow.variable.load.indirect %115 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1238 = flow.variable.load.indirect %116 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1239 = flow.variable.load.indirect %117 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1240 = flow.variable.load.indirect %118 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1241 = flow.variable.load.indirect %119 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1242 = flow.variable.load.indirect %120 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1243 = flow.variable.load.indirect %121 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1244 = flow.variable.load.indirect %122 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1245 = flow.variable.load.indirect %123 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1246 = flow.variable.load.indirect %124 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1247 = flow.variable.load.indirect %125 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1248 = flow.variable.load.indirect %126 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1249 = flow.variable.load.indirect %127 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1250 = flow.variable.load.indirect %128 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1251 = flow.variable.load.indirect %129 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1252 = flow.variable.load.indirect %130 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1253 = flow.variable.load.indirect %131 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1254 = flow.variable.load.indirect %132 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1255 = flow.variable.load.indirect %133 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1256 = flow.variable.load.indirect %134 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1257 = flow.variable.load.indirect %135 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1258 = flow.variable.load.indirect %136 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1259 = flow.variable.load.indirect %137 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1260 = flow.variable.load.indirect %138 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1261 = flow.variable.load.indirect %139 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1262 = flow.variable.load.indirect %140 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1263 = flow.variable.load.indirect %141 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1264 = flow.variable.load.indirect %142 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1265 = flow.variable.load.indirect %143 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1266 = flow.variable.load.indirect %144 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1267 = flow.variable.load.indirect %145 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1268 = flow.variable.load.indirect %146 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1269 = flow.variable.load.indirect %147 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1270 = flow.variable.load.indirect %148 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1271 = flow.variable.load.indirect %149 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1272 = flow.variable.load.indirect %150 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1273 = flow.variable.load.indirect %151 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1274 = flow.variable.load.indirect %152 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1275 = flow.variable.load.indirect %153 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1276 = flow.variable.load.indirect %154 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1277 = flow.variable.load.indirect %155 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1278 = flow.variable.load.indirect %156 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1279 = flow.variable.load.indirect %157 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1280 = flow.variable.load.indirect %158 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1281 = flow.variable.load.indirect %159 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1282 = flow.variable.load.indirect %160 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1283 = flow.variable.load.indirect %161 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1284 = flow.variable.load.indirect %162 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1285 = flow.variable.load.indirect %163 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1286 = flow.variable.load.indirect %164 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1287 = flow.variable.load.indirect %165 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1288 = flow.variable.load.indirect %166 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1289 = flow.variable.load.indirect %167 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1290 = flow.variable.load.indirect %168 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1291 = flow.variable.load.indirect %169 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1292 = flow.variable.load.indirect %170 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1293 = flow.variable.load.indirect %171 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1294 = flow.variable.load.indirect %172 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1295 = flow.variable.load.indirect %173 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1296 = flow.variable.load.indirect %174 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1297 = flow.variable.load.indirect %175 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1298 = flow.variable.load.indirect %176 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1299 = flow.variable.load.indirect %177 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1300 = flow.variable.load.indirect %178 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1301 = flow.variable.load.indirect %179 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1302 = flow.variable.load.indirect %180 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1303 = flow.variable.load.indirect %181 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1304 = flow.variable.load.indirect %182 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1305 = flow.variable.load.indirect %183 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1306 = flow.variable.load.indirect %184 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1307 = flow.variable.load.indirect %185 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1308 = flow.variable.load.indirect %186 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1309 = flow.variable.load.indirect %187 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1310 = flow.variable.load.indirect %188 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1311 = flow.variable.load.indirect %189 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1312 = flow.variable.load.indirect %190 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1313 = flow.variable.load.indirect %191 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1314 = flow.variable.load.indirect %192 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1315 = flow.variable.load.indirect %193 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1316 = flow.variable.load.indirect %194 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1317 = flow.variable.load.indirect %195 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1318 = flow.variable.load.indirect %196 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1319 = flow.variable.load.indirect %197 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1320 = flow.variable.load.indirect %198 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1321 = flow.variable.load.indirect %199 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1322 = flow.variable.load.indirect %200 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1323 = flow.variable.load.indirect %201 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1324 = flow.variable.load.indirect %202 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1325 = flow.variable.load.indirect %203 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1326 = flow.variable.load.indirect %204 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1327 = flow.variable.load.indirect %205 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1328 = flow.variable.load.indirect %206 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1329 = flow.variable.load.indirect %207 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1330 = flow.variable.load.indirect %208 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1331 = flow.variable.load.indirect %209 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1332 = flow.variable.load.indirect %210 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1333 = flow.variable.load.indirect %211 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1334 = flow.variable.load.indirect %212 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1335 = flow.variable.load.indirect %213 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1336 = flow.variable.load.indirect %214 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1337 = flow.variable.load.indirect %215 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1338 = flow.variable.load.indirect %216 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1339 = flow.variable.load.indirect %217 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1340 = flow.variable.load.indirect %218 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1341 = flow.variable.load.indirect %219 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1342 = flow.variable.load.indirect %220 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1343 = flow.variable.load.indirect %221 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1344 = flow.variable.load.indirect %222 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1345 = flow.variable.load.indirect %223 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1346 = flow.variable.load.indirect %224 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1347 = flow.variable.load.indirect %225 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1348 = flow.variable.load.indirect %226 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1349 = flow.variable.load.indirect %227 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1350 = flow.variable.load.indirect %228 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1351 = flow.variable.load.indirect %229 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1352 = flow.variable.load.indirect %230 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1353 = flow.variable.load.indirect %231 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1354 = flow.variable.load.indirect %232 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1355 = flow.variable.load.indirect %233 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1356 = flow.variable.load.indirect %234 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1357 = flow.variable.load.indirect %235 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1358 = flow.variable.load.indirect %236 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1359 = flow.variable.load.indirect %237 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1360 = flow.variable.load.indirect %238 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1361 = flow.variable.load.indirect %239 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1362 = flow.variable.load.indirect %240 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1363 = flow.variable.load.indirect %241 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1364 = flow.variable.load.indirect %242 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1365 = flow.variable.load.indirect %243 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1366 = flow.variable.load.indirect %244 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1367 = flow.variable.load.indirect %245 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1368 = flow.variable.load.indirect %246 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1369 = flow.variable.load.indirect %247 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1370 = flow.variable.load.indirect %248 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1371 = flow.variable.load.indirect %249 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1372 = flow.variable.load.indirect %250 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1373 = flow.variable.load.indirect %251 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1374 = flow.variable.load.indirect %252 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1375 = flow.variable.load.indirect %253 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1376 = flow.variable.load.indirect %254 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1377 = flow.variable.load.indirect %255 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1378 = flow.variable.load.indirect %256 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1379 = flow.variable.load.indirect %257 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1380 = flow.variable.load.indirect %258 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1381 = flow.variable.load.indirect %259 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1382 = flow.variable.load.indirect %260 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1383 = flow.variable.load.indirect %261 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1384 = flow.variable.load.indirect %262 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1385 = flow.variable.load.indirect %263 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1386 = flow.variable.load.indirect %264 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1387 = flow.variable.load.indirect %265 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1388 = flow.variable.load.indirect %266 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1389 = flow.variable.load.indirect %267 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1390 = flow.variable.load.indirect %268 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1391 = flow.variable.load.indirect %269 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1392 = flow.variable.load.indirect %270 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1393 = flow.variable.load.indirect %271 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1394 = flow.variable.load.indirect %272 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1395 = flow.variable.load.indirect %273 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1396 = flow.variable.load.indirect %274 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1397 = flow.variable.load.indirect %275 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1398 = flow.variable.load.indirect %276 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1399 = flow.variable.load.indirect %277 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1400 = flow.variable.load.indirect %278 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1401 = flow.variable.load.indirect %279 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1402 = flow.variable.load.indirect %280 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1403 = flow.variable.load.indirect %281 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1404 = flow.variable.load.indirect %282 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1405 = flow.variable.load.indirect %283 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1406 = flow.variable.load.indirect %284 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1407 = flow.variable.load.indirect %285 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1408 = flow.variable.load.indirect %286 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1409 = flow.variable.load.indirect %287 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1410 = flow.variable.load.indirect %288 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1411 = flow.variable.load.indirect %289 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1412 = flow.variable.load.indirect %290 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1413 = flow.variable.load.indirect %291 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1414 = flow.variable.load.indirect %292 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1415 = flow.variable.load.indirect %293 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1416 = flow.variable.load.indirect %294 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1417 = flow.variable.load.indirect %295 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1418 = flow.variable.load.indirect %296 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1419 = flow.variable.load.indirect %297 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1420 = flow.variable.load.indirect %298 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1421 = flow.variable.load.indirect %299 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1422 = flow.variable.load.indirect %300 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1423 = flow.variable.load.indirect %301 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1424 = flow.variable.load.indirect %302 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1425 = flow.variable.load.indirect %303 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1426 = flow.variable.load.indirect %304 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1427 = flow.variable.load.indirect %305 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1428 = flow.variable.load.indirect %306 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1429 = flow.variable.load.indirect %307 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1430 = flow.variable.load.indirect %308 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1431 = flow.variable.load.indirect %309 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1432 = flow.variable.load.indirect %310 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1433 = flow.variable.load.indirect %311 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1434 = flow.variable.load.indirect %312 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1435 = flow.variable.load.indirect %313 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1436 = flow.variable.load.indirect %314 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1437 = flow.variable.load.indirect %315 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1438 = flow.variable.load.indirect %316 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1439 = flow.variable.load.indirect %317 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1440 = flow.variable.load.indirect %318 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1441 = flow.variable.load.indirect %319 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1442 = flow.variable.load.indirect %320 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1443 = flow.variable.load.indirect %321 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1444 = flow.variable.load.indirect %322 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1445 = flow.variable.load.indirect %323 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1446 = flow.variable.load.indirect %324 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1447 = flow.variable.load.indirect %325 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1448 = flow.variable.load.indirect %326 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1449 = flow.variable.load.indirect %327 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1450 = flow.variable.load.indirect %328 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1451 = flow.variable.load.indirect %329 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1452 = flow.variable.load.indirect %330 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1453 = flow.variable.load.indirect %331 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1454 = flow.variable.load.indirect %332 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1455 = flow.variable.load.indirect %333 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1456 = flow.variable.load.indirect %334 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1457 = flow.variable.load.indirect %335 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1458 = flow.variable.load.indirect %336 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1459 = flow.variable.load.indirect %337 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1460 = flow.variable.load.indirect %338 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1461 = flow.variable.load.indirect %339 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1462 = flow.variable.load.indirect %340 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1463 = flow.variable.load.indirect %341 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1464 = flow.variable.load.indirect %342 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1465 = flow.variable.load.indirect %343 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1466 = flow.variable.load.indirect %344 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1467 = flow.variable.load.indirect %345 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1468 = flow.variable.load.indirect %346 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1469 = flow.variable.load.indirect %347 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1470 = flow.variable.load.indirect %348 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1471 = flow.variable.load.indirect %349 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1472 = flow.variable.load.indirect %350 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1473 = flow.variable.load.indirect %351 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1474 = flow.variable.load.indirect %352 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1475 = flow.variable.load.indirect %353 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1476 = flow.variable.load.indirect %354 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1477 = flow.variable.load.indirect %355 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1478 = flow.variable.load.indirect %356 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1479 = flow.variable.load.indirect %357 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1480 = flow.variable.load.indirect %358 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1481 = flow.variable.load.indirect %359 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1482 = flow.variable.load.indirect %360 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1483 = flow.variable.load.indirect %361 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1484 = flow.variable.load.indirect %362 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1485 = flow.variable.load.indirect %363 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1486 = flow.variable.load.indirect %364 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1487 = flow.variable.load.indirect %365 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1488 = flow.variable.load.indirect %366 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1489 = flow.variable.load.indirect %367 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1490 = flow.variable.load.indirect %368 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1491 = flow.variable.load.indirect %369 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1492 = flow.variable.load.indirect %370 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1493 = flow.variable.load.indirect %371 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1494 = flow.variable.load.indirect %372 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1495 = flow.variable.load.indirect %373 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1496 = flow.variable.load.indirect %374 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1497 = flow.variable.load.indirect %375 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1498 = flow.variable.load.indirect %376 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1499 = flow.variable.load.indirect %377 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1500 = flow.variable.load.indirect %378 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1501 = flow.variable.load.indirect %379 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1502 = flow.variable.load.indirect %380 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1503 = flow.variable.load.indirect %381 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1504 = flow.variable.load.indirect %382 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1505 = flow.variable.load.indirect %383 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1506 = flow.variable.load.indirect %384 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1507 = flow.variable.load.indirect %385 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1508 = flow.variable.load.indirect %386 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1509 = flow.variable.load.indirect %387 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1510 = flow.variable.load.indirect %388 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1511 = flow.variable.load.indirect %389 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1512 = flow.variable.load.indirect %390 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1513 = flow.variable.load.indirect %391 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1514 = flow.variable.load.indirect %392 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1515 = flow.variable.load.indirect %393 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1516 = flow.variable.load.indirect %394 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1517 = flow.variable.load.indirect %395 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1518 = flow.variable.load.indirect %396 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1519 = flow.variable.load.indirect %397 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1520 = flow.variable.load.indirect %398 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1521 = flow.variable.load.indirect %399 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1522 = flow.variable.load.indirect %400 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1523 = flow.variable.load.indirect %401 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1524 = flow.variable.load.indirect %402 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1525 = flow.variable.load.indirect %403 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1526 = flow.variable.load.indirect %404 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1527 = flow.variable.load.indirect %405 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1528 = flow.variable.load.indirect %406 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1529 = flow.variable.load.indirect %407 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1530 = flow.variable.load.indirect %408 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1531 = flow.variable.load.indirect %409 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1532 = flow.variable.load.indirect %410 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1533 = flow.variable.load.indirect %411 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1534 = flow.variable.load.indirect %412 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1535 = flow.variable.load.indirect %413 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1536 = flow.variable.load.indirect %414 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1537 = flow.variable.load.indirect %415 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1538 = flow.variable.load.indirect %416 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1539 = flow.variable.load.indirect %417 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1540 = flow.variable.load.indirect %418 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1541 = flow.variable.load.indirect %419 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1542 = flow.variable.load.indirect %420 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1543 = flow.variable.load.indirect %421 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1544 = flow.variable.load.indirect %422 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1545 = flow.variable.load.indirect %423 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1546 = flow.variable.load.indirect %424 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1547 = flow.variable.load.indirect %425 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1548 = flow.variable.load.indirect %426 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1549 = flow.variable.load.indirect %427 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1550 = flow.variable.load.indirect %428 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1551 = flow.variable.load.indirect %429 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1552 = flow.variable.load.indirect %430 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1553 = flow.variable.load.indirect %431 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1554 = flow.variable.load.indirect %432 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1555 = flow.variable.load.indirect %433 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1556 = flow.variable.load.indirect %434 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1557 = flow.variable.load.indirect %435 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1558 = flow.variable.load.indirect %436 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1559 = flow.variable.load.indirect %437 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1560 = flow.variable.load.indirect %438 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1561 = flow.variable.load.indirect %439 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1562 = flow.variable.load.indirect %440 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1563 = flow.variable.load.indirect %441 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1564 = flow.variable.load.indirect %442 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1565 = flow.variable.load.indirect %443 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1566 = flow.variable.load.indirect %444 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1567 = flow.variable.load.indirect %445 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1568 = flow.variable.load.indirect %446 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1569 = flow.variable.load.indirect %447 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1570 = flow.variable.load.indirect %448 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1571 = flow.variable.load.indirect %449 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1572 = flow.variable.load.indirect %450 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1573 = flow.variable.load.indirect %451 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1574 = flow.variable.load.indirect %452 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1575 = flow.variable.load.indirect %453 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1576 = flow.variable.load.indirect %454 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1577 = flow.variable.load.indirect %455 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1578 = flow.variable.load.indirect %456 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1579 = flow.variable.load.indirect %457 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1580 = flow.variable.load.indirect %458 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1581 = flow.variable.load.indirect %459 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1582 = flow.variable.load.indirect %460 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1583 = flow.variable.load.indirect %461 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1584 = flow.variable.load.indirect %462 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1585 = flow.variable.load.indirect %463 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1586 = flow.variable.load.indirect %464 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1587 = flow.variable.load.indirect %465 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1588 = flow.variable.load.indirect %466 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1589 = flow.variable.load.indirect %467 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1590 = flow.variable.load.indirect %468 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1591 = flow.variable.load.indirect %469 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1592 = flow.variable.load.indirect %470 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1593 = flow.variable.load.indirect %471 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1594 = flow.variable.load.indirect %472 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1595 = flow.variable.load.indirect %473 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1596 = flow.variable.load.indirect %474 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1597 = flow.variable.load.indirect %475 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1598 = flow.variable.load.indirect %476 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1599 = flow.variable.load.indirect %477 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1600 = flow.variable.load.indirect %478 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1601 = flow.variable.load.indirect %479 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1602 = flow.variable.load.indirect %480 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1603 = flow.variable.load.indirect %481 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1604 = flow.variable.load.indirect %482 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1605 = flow.variable.load.indirect %483 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1606 = flow.variable.load.indirect %484 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1607 = flow.variable.load.indirect %485 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1608 = flow.variable.load.indirect %486 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1609 = flow.variable.load.indirect %487 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1610 = flow.variable.load.indirect %488 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1611 = flow.variable.load.indirect %489 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1612 = flow.variable.load.indirect %490 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1613 = flow.variable.load.indirect %491 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1614 = flow.variable.load.indirect %492 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1615 = flow.variable.load.indirect %493 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1616 = flow.variable.load.indirect %494 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1617 = flow.variable.load.indirect %495 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1618 = flow.variable.load.indirect %496 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1619 = flow.variable.load.indirect %497 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1620 = flow.variable.load.indirect %498 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1621 = flow.variable.load.indirect %499 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1622 = flow.variable.load.indirect %500 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1623 = flow.variable.load.indirect %501 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1624 = flow.variable.load.indirect %502 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1625 = flow.variable.load.indirect %503 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1626 = flow.variable.load.indirect %504 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1627 = flow.variable.load.indirect %505 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1628 = flow.variable.load.indirect %506 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1629 = flow.variable.load.indirect %507 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1630 = flow.variable.load.indirect %508 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1631 = flow.variable.load.indirect %509 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1632 = flow.variable.load.indirect %510 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1633 = flow.variable.load.indirect %511 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1634 = flow.variable.load.indirect %512 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1635 = flow.variable.load.indirect %513 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1636 = flow.variable.load.indirect %514 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1637 = flow.variable.load.indirect %515 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1638 = flow.variable.load.indirect %516 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1639 = flow.variable.load.indirect %517 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1640 = flow.variable.load.indirect %518 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1641 = flow.variable.load.indirect %519 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1642 = flow.variable.load.indirect %520 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1643 = flow.variable.load.indirect %521 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1644 = flow.variable.load.indirect %522 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1645 = flow.variable.load.indirect %523 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1646 = flow.variable.load.indirect %524 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1647 = flow.variable.load.indirect %525 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1648 = flow.variable.load.indirect %526 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1649 = flow.variable.load.indirect %527 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1650 = flow.variable.load.indirect %528 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1651 = flow.variable.load.indirect %529 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1652 = flow.variable.load.indirect %530 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1653 = flow.variable.load.indirect %531 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1654 = flow.variable.load.indirect %532 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1655 = flow.variable.load.indirect %533 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1656 = flow.variable.load.indirect %534 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1657 = flow.variable.load.indirect %535 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1658 = flow.variable.load.indirect %536 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1659 = flow.variable.load.indirect %537 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1660 = flow.variable.load.indirect %538 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1661 = flow.variable.load.indirect %539 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1662 = flow.variable.load.indirect %540 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1663 = flow.variable.load.indirect %541 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1664 = flow.variable.load.indirect %542 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1665 = flow.variable.load.indirect %543 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1666 = flow.variable.load.indirect %544 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1667 = flow.variable.load.indirect %545 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1668 = flow.variable.load.indirect %546 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1669 = flow.variable.load.indirect %547 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1670 = flow.variable.load.indirect %548 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1671 = flow.variable.load.indirect %549 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1672 = flow.variable.load.indirect %550 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1673 = flow.variable.load.indirect %551 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1674 = flow.variable.load.indirect %552 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1675 = flow.variable.load.indirect %553 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1676 = flow.variable.load.indirect %554 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1677 = flow.variable.load.indirect %555 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1678 = flow.variable.load.indirect %556 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1679 = flow.variable.load.indirect %557 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1680 = flow.variable.load.indirect %558 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1681 = flow.variable.load.indirect %559 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1682 = flow.variable.load.indirect %560 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1683 = flow.variable.load.indirect %561 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1684 = flow.variable.load.indirect %562 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1685 = flow.variable.load.indirect %563 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1686 = flow.variable.load.indirect %564 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1687 = flow.variable.load.indirect %565 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1688 = flow.variable.load.indirect %566 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1689 = flow.variable.load.indirect %567 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1690 = flow.variable.load.indirect %568 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1691 = flow.variable.load.indirect %569 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1692 = flow.variable.load.indirect %570 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1693 = flow.variable.load.indirect %571 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1694 = flow.variable.load.indirect %572 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1695 = flow.variable.load.indirect %573 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1696 = flow.variable.load.indirect %574 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1697 = flow.variable.load.indirect %575 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1698 = flow.variable.load.indirect %576 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1699 = flow.variable.load.indirect %577 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1700 = flow.variable.load.indirect %578 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1701 = flow.variable.load.indirect %579 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1702 = flow.variable.load.indirect %580 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1703 = flow.variable.load.indirect %581 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1704 = flow.variable.load.indirect %582 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1705 = flow.variable.load.indirect %583 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1706 = flow.variable.load.indirect %584 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1707 = flow.variable.load.indirect %585 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1708 = flow.variable.load.indirect %586 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1709 = flow.variable.load.indirect %587 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1710 = flow.variable.load.indirect %588 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1711 = flow.variable.load.indirect %589 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1712 = flow.variable.load.indirect %590 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1713 = flow.variable.load.indirect %591 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1714 = flow.variable.load.indirect %592 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1715 = flow.variable.load.indirect %593 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1716 = flow.variable.load.indirect %594 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1717 = flow.variable.load.indirect %595 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1718 = flow.variable.load.indirect %596 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1719 = flow.variable.load.indirect %597 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1720 = flow.variable.load.indirect %598 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1721 = flow.variable.load.indirect %599 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1722 = flow.variable.load.indirect %600 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1723 = flow.variable.load.indirect %601 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1724 = flow.variable.load.indirect %602 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1725 = flow.variable.load.indirect %603 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1726 = flow.variable.load.indirect %604 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1727 = flow.variable.load.indirect %605 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1728 = flow.variable.load.indirect %606 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1729 = flow.variable.load.indirect %607 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1730 = flow.variable.load.indirect %608 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1731 = flow.variable.load.indirect %609 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1732 = flow.variable.load.indirect %610 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1733 = flow.variable.load.indirect %611 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1734 = flow.variable.load.indirect %612 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1735 = flow.variable.load.indirect %613 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1736 = flow.variable.load.indirect %614 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1737 = flow.variable.load.indirect %615 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1738 = flow.variable.load.indirect %616 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1739 = flow.variable.load.indirect %617 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1740 = flow.variable.load.indirect %618 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1741 = flow.variable.load.indirect %619 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1742 = flow.variable.load.indirect %620 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1743 = flow.variable.load.indirect %621 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1744 = flow.variable.load.indirect %622 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1745 = flow.variable.load.indirect %623 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1746 = flow.variable.load.indirect %624 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1747 = flow.variable.load.indirect %625 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1748 = flow.variable.load.indirect %626 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1749 = flow.variable.load.indirect %627 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1750 = flow.variable.load.indirect %628 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1751 = flow.variable.load.indirect %629 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1752 = flow.variable.load.indirect %630 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1753 = flow.variable.load.indirect %631 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1754 = flow.variable.load.indirect %632 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1755 = flow.variable.load.indirect %633 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1756 = flow.variable.load.indirect %634 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1757 = flow.variable.load.indirect %635 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1758 = flow.variable.load.indirect %636 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1759 = flow.variable.load.indirect %637 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1760 = flow.variable.load.indirect %638 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1761 = flow.variable.load.indirect %639 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1762 = flow.variable.load.indirect %640 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1763 = flow.variable.load.indirect %641 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1764 = flow.variable.load.indirect %642 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1765 = flow.variable.load.indirect %643 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1766 = flow.variable.load.indirect %644 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1767 = flow.variable.load.indirect %645 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1768 = flow.variable.load.indirect %646 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1769 = flow.variable.load.indirect %647 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1770 = flow.variable.load.indirect %648 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1771 = flow.variable.load.indirect %649 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1772 = flow.variable.load.indirect %650 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1773 = flow.variable.load.indirect %651 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1774 = flow.variable.load.indirect %652 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1775 = flow.variable.load.indirect %653 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1776 = flow.variable.load.indirect %654 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1777 = flow.variable.load.indirect %655 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1778 = flow.variable.load.indirect %656 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1779 = flow.variable.load.indirect %657 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1780 = flow.variable.load.indirect %658 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1781 = flow.variable.load.indirect %659 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1782 = flow.variable.load.indirect %660 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1783 = flow.variable.load.indirect %661 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1784 = flow.variable.load.indirect %662 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1785 = flow.variable.load.indirect %663 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1786 = flow.variable.load.indirect %664 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1787 = flow.variable.load.indirect %665 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1788 = flow.variable.load.indirect %666 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1789 = flow.variable.load.indirect %667 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1790 = flow.variable.load.indirect %668 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1791 = flow.variable.load.indirect %669 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1792 = flow.variable.load.indirect %670 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1793 = flow.variable.load.indirect %671 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1794 = flow.variable.load.indirect %672 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1795 = flow.variable.load.indirect %673 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1796 = flow.variable.load.indirect %674 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1797 = flow.variable.load.indirect %675 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1798 = flow.variable.load.indirect %676 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1799 = flow.variable.load.indirect %677 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1800 = flow.variable.load.indirect %678 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1801 = flow.variable.load.indirect %679 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1802 = flow.variable.load.indirect %680 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1803 = flow.variable.load.indirect %681 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1804 = flow.variable.load.indirect %682 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1805 = flow.variable.load.indirect %683 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1806 = flow.variable.load.indirect %684 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1807 = flow.variable.load.indirect %685 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1808 = flow.variable.load.indirect %686 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1809 = flow.variable.load.indirect %687 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1810 = flow.variable.load.indirect %688 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1811 = flow.variable.load.indirect %689 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1812 = flow.variable.load.indirect %690 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1813 = flow.variable.load.indirect %691 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1814 = flow.variable.load.indirect %692 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1815 = flow.variable.load.indirect %693 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1816 = flow.variable.load.indirect %694 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1817 = flow.variable.load.indirect %695 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1818 = flow.variable.load.indirect %696 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1819 = flow.variable.load.indirect %697 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1820 = flow.variable.load.indirect %698 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1821 = flow.variable.load.indirect %699 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1822 = flow.variable.load.indirect %700 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1823 = flow.variable.load.indirect %701 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1824 = flow.variable.load.indirect %702 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1825 = flow.variable.load.indirect %703 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1826 = flow.variable.load.indirect %704 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1827 = flow.variable.load.indirect %705 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1828 = flow.variable.load.indirect %706 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1829 = flow.variable.load.indirect %707 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1830 = flow.variable.load.indirect %708 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1831 = flow.variable.load.indirect %709 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1832 = flow.variable.load.indirect %710 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1833 = flow.variable.load.indirect %711 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1834 = flow.variable.load.indirect %712 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1835 = flow.variable.load.indirect %713 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1836 = flow.variable.load.indirect %714 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1837 = flow.variable.load.indirect %715 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1838 = flow.variable.load.indirect %716 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1839 = flow.variable.load.indirect %717 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1840 = flow.variable.load.indirect %718 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1841 = flow.variable.load.indirect %719 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1842 = flow.variable.load.indirect %720 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1843 = flow.variable.load.indirect %721 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1844 = flow.variable.load.indirect %722 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1845 = flow.variable.load.indirect %723 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1846 = flow.variable.load.indirect %724 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1847 = flow.variable.load.indirect %725 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1848 = flow.variable.load.indirect %726 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1849 = flow.variable.load.indirect %727 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1850 = flow.variable.load.indirect %728 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1851 = flow.variable.load.indirect %729 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1852 = flow.variable.load.indirect %730 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1853 = flow.variable.load.indirect %731 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1854 = flow.variable.load.indirect %732 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1855 = flow.variable.load.indirect %733 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1856 = flow.variable.load.indirect %734 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1857 = flow.variable.load.indirect %735 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1858 = flow.variable.load.indirect %736 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1859 = flow.variable.load.indirect %737 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1860 = flow.variable.load.indirect %738 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1861 = flow.variable.load.indirect %739 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1862 = flow.variable.load.indirect %740 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1863 = flow.variable.load.indirect %741 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1864 = flow.variable.load.indirect %742 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1865 = flow.variable.load.indirect %743 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1866 = flow.variable.load.indirect %744 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1867 = flow.variable.load.indirect %745 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1868 = flow.variable.load.indirect %746 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1869 = flow.variable.load.indirect %747 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1870 = flow.variable.load.indirect %748 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1871 = flow.variable.load.indirect %749 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1872 = flow.variable.load.indirect %750 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1873 = flow.variable.load.indirect %751 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1874 = flow.variable.load.indirect %752 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1875 = flow.variable.load.indirect %753 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1876 = flow.variable.load.indirect %754 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1877 = flow.variable.load.indirect %755 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1878 = flow.variable.load.indirect %756 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1879 = flow.variable.load.indirect %757 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1880 = flow.variable.load.indirect %758 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1881 = flow.variable.load.indirect %759 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1882 = flow.variable.load.indirect %760 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1883 = flow.variable.load.indirect %761 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1884 = flow.variable.load.indirect %762 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1885 = flow.variable.load.indirect %763 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1886 = flow.variable.load.indirect %764 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1887 = flow.variable.load.indirect %765 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1888 = flow.variable.load.indirect %766 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1889 = flow.variable.load.indirect %767 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1890 = flow.variable.load.indirect %768 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1891 = flow.variable.load.indirect %769 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1892 = flow.variable.load.indirect %770 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1893 = flow.variable.load.indirect %771 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1894 = flow.variable.load.indirect %772 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1895 = flow.variable.load.indirect %773 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1896 = flow.variable.load.indirect %774 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1897 = flow.variable.load.indirect %775 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1898 = flow.variable.load.indirect %776 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1899 = flow.variable.load.indirect %777 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1900 = flow.variable.load.indirect %778 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1901 = flow.variable.load.indirect %779 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1902 = flow.variable.load.indirect %780 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1903 = flow.variable.load.indirect %781 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1904 = flow.variable.load.indirect %782 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1905 = flow.variable.load.indirect %783 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1906 = flow.variable.load.indirect %784 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1907 = flow.variable.load.indirect %785 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1908 = flow.variable.load.indirect %786 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1909 = flow.variable.load.indirect %787 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1910 = flow.variable.load.indirect %788 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1911 = flow.variable.load.indirect %789 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1912 = flow.variable.load.indirect %790 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1913 = flow.variable.load.indirect %791 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1914 = flow.variable.load.indirect %792 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1915 = flow.variable.load.indirect %793 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1916 = flow.variable.load.indirect %794 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1917 = flow.variable.load.indirect %795 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1918 = flow.variable.load.indirect %796 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1919 = flow.variable.load.indirect %797 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1920 = flow.variable.load.indirect %798 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1921 = flow.variable.load.indirect %799 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1922 = flow.variable.load.indirect %800 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1923 = flow.variable.load.indirect %801 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1924 = flow.variable.load.indirect %802 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1925 = flow.variable.load.indirect %803 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1926 = flow.variable.load.indirect %804 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1927 = flow.variable.load.indirect %805 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1928 = flow.variable.load.indirect %806 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1929 = flow.variable.load.indirect %807 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1930 = flow.variable.load.indirect %808 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1931 = flow.variable.load.indirect %809 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1932 = flow.variable.load.indirect %810 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1933 = flow.variable.load.indirect %811 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1934 = flow.variable.load.indirect %812 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1935 = flow.variable.load.indirect %813 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1936 = flow.variable.load.indirect %814 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1937 = flow.variable.load.indirect %815 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1938 = flow.variable.load.indirect %816 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1939 = flow.variable.load.indirect %817 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1940 = flow.variable.load.indirect %818 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1941 = flow.variable.load.indirect %819 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1942 = flow.variable.load.indirect %820 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1943 = flow.variable.load.indirect %821 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1944 = flow.variable.load.indirect %822 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1945 = flow.variable.load.indirect %823 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1946 = flow.variable.load.indirect %824 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1947 = flow.variable.load.indirect %825 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1948 = flow.variable.load.indirect %826 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1949 = flow.variable.load.indirect %827 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1950 = flow.variable.load.indirect %828 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1951 = flow.variable.load.indirect %829 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1952 = flow.variable.load.indirect %830 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1953 = flow.variable.load.indirect %831 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1954 = flow.variable.load.indirect %832 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1955 = flow.variable.load.indirect %833 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1956 = flow.variable.load.indirect %834 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1957 = flow.variable.load.indirect %835 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1958 = flow.variable.load.indirect %836 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1959 = flow.variable.load.indirect %837 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1960 = flow.variable.load.indirect %838 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1961 = flow.variable.load.indirect %839 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1962 = flow.variable.load.indirect %840 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1963 = flow.variable.load.indirect %841 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1964 = flow.variable.load.indirect %842 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %1965 = flow.variable.load.indirect %843 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1966 = flow.variable.load.indirect %844 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1967 = flow.variable.load.indirect %845 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1968 = flow.variable.load.indirect %846 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1969 = flow.variable.load.indirect %847 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1970 = flow.variable.load.indirect %848 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1971 = flow.variable.load.indirect %849 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1972 = flow.variable.load.indirect %850 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1973 = flow.variable.load.indirect %851 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1974 = flow.variable.load.indirect %852 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1975 = flow.variable.load.indirect %853 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1976 = flow.variable.load.indirect %854 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1977 = flow.variable.load.indirect %855 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1978 = flow.variable.load.indirect %856 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1979 = flow.variable.load.indirect %857 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1980 = flow.variable.load.indirect %858 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1981 = flow.variable.load.indirect %859 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1982 = flow.variable.load.indirect %860 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1983 = flow.variable.load.indirect %861 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1984 = flow.variable.load.indirect %862 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1985 = flow.variable.load.indirect %863 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1986 = flow.variable.load.indirect %864 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1987 = flow.variable.load.indirect %865 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1988 = flow.variable.load.indirect %866 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1989 = flow.variable.load.indirect %867 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1990 = flow.variable.load.indirect %868 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1991 = flow.variable.load.indirect %869 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1992 = flow.variable.load.indirect %870 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %1993 = flow.variable.load.indirect %871 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1994 = flow.variable.load.indirect %872 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %1995 = flow.variable.load.indirect %873 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1996 = flow.variable.load.indirect %874 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %1997 = flow.variable.load.indirect %875 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1998 = flow.variable.load.indirect %876 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %1999 = flow.variable.load.indirect %877 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2000 = flow.variable.load.indirect %878 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2001 = flow.variable.load.indirect %879 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2002 = flow.variable.load.indirect %880 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2003 = flow.variable.load.indirect %881 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2004 = flow.variable.load.indirect %882 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2005 = flow.variable.load.indirect %883 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2006 = flow.variable.load.indirect %884 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2007 = flow.variable.load.indirect %885 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2008 = flow.variable.load.indirect %886 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2009 = flow.variable.load.indirect %887 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2010 = flow.variable.load.indirect %888 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2011 = flow.variable.load.indirect %889 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2012 = flow.variable.load.indirect %890 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2013 = flow.variable.load.indirect %891 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2014 = flow.variable.load.indirect %892 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2015 = flow.variable.load.indirect %893 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2016 = flow.variable.load.indirect %894 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2017 = flow.variable.load.indirect %895 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2018 = flow.variable.load.indirect %896 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2019 = flow.variable.load.indirect %897 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2020 = flow.variable.load.indirect %898 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2021 = flow.variable.load.indirect %899 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2022 = flow.variable.load.indirect %900 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2023 = flow.variable.load.indirect %901 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2024 = flow.variable.load.indirect %902 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2025 = flow.variable.load.indirect %903 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2026 = flow.variable.load.indirect %904 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2027 = flow.variable.load.indirect %905 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2028 = flow.variable.load.indirect %906 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2029 = flow.variable.load.indirect %907 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2030 = flow.variable.load.indirect %908 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2031 = flow.variable.load.indirect %909 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2032 = flow.variable.load.indirect %910 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2033 = flow.variable.load.indirect %911 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2034 = flow.variable.load.indirect %912 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2035 = flow.variable.load.indirect %913 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2036 = flow.variable.load.indirect %914 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2037 = flow.variable.load.indirect %915 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2038 = flow.variable.load.indirect %916 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2039 = flow.variable.load.indirect %917 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2040 = flow.variable.load.indirect %918 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2041 = flow.variable.load.indirect %919 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2042 = flow.variable.load.indirect %920 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2043 = flow.variable.load.indirect %921 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2044 = flow.variable.load.indirect %922 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2045 = flow.variable.load.indirect %923 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2046 = flow.variable.load.indirect %924 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2047 = flow.variable.load.indirect %925 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2048 = flow.variable.load.indirect %926 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2049 = flow.variable.load.indirect %927 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2050 = flow.variable.load.indirect %928 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2051 = flow.variable.load.indirect %929 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2052 = flow.variable.load.indirect %930 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2053 = flow.variable.load.indirect %931 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2054 = flow.variable.load.indirect %932 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2055 = flow.variable.load.indirect %933 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2056 = flow.variable.load.indirect %934 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2057 = flow.variable.load.indirect %935 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2058 = flow.variable.load.indirect %936 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2059 = flow.variable.load.indirect %937 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2060 = flow.variable.load.indirect %938 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2061 = flow.variable.load.indirect %939 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2062 = flow.variable.load.indirect %940 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2063 = flow.variable.load.indirect %941 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2064 = flow.variable.load.indirect %942 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2065 = flow.variable.load.indirect %943 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2066 = flow.variable.load.indirect %944 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2067 = flow.variable.load.indirect %945 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2068 = flow.variable.load.indirect %946 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2069 = flow.variable.load.indirect %947 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2070 = flow.variable.load.indirect %948 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2071 = flow.variable.load.indirect %949 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2072 = flow.variable.load.indirect %950 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2073 = flow.variable.load.indirect %951 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2074 = flow.variable.load.indirect %952 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2075 = flow.variable.load.indirect %953 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2076 = flow.variable.load.indirect %954 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2077 = flow.variable.load.indirect %955 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2078 = flow.variable.load.indirect %956 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2079 = flow.variable.load.indirect %957 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2080 = flow.variable.load.indirect %958 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2081 = flow.variable.load.indirect %959 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2082 = flow.variable.load.indirect %960 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2083 = flow.variable.load.indirect %961 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2084 = flow.variable.load.indirect %962 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2085 = flow.variable.load.indirect %963 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2086 = flow.variable.load.indirect %964 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2087 = flow.variable.load.indirect %965 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2088 = flow.variable.load.indirect %966 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2089 = flow.variable.load.indirect %967 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2090 = flow.variable.load.indirect %968 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2091 = flow.variable.load.indirect %969 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2092 = flow.variable.load.indirect %970 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2093 = flow.variable.load.indirect %971 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2094 = flow.variable.load.indirect %972 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2095 = flow.variable.load.indirect %973 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2096 = flow.variable.load.indirect %974 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2097 = flow.variable.load.indirect %975 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2098 = flow.variable.load.indirect %976 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2099 = flow.variable.load.indirect %977 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2100 = flow.variable.load.indirect %978 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2101 = flow.variable.load.indirect %979 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2102 = flow.variable.load.indirect %980 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2103 = flow.variable.load.indirect %981 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2104 = flow.variable.load.indirect %982 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2105 = flow.variable.load.indirect %983 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2106 = flow.variable.load.indirect %984 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2107 = flow.variable.load.indirect %985 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2108 = flow.variable.load.indirect %986 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2109 = flow.variable.load.indirect %987 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2110 = flow.variable.load.indirect %988 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2111 = flow.variable.load.indirect %989 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2112 = flow.variable.load.indirect %990 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2113 = flow.variable.load.indirect %991 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2114 = flow.variable.load.indirect %992 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2115 = flow.variable.load.indirect %993 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2116 = flow.variable.load.indirect %994 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2117 = flow.variable.load.indirect %995 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2118 = flow.variable.load.indirect %996 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2119 = flow.variable.load.indirect %997 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2120 = flow.variable.load.indirect %998 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2121 = flow.variable.load.indirect %999 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2122 = flow.variable.load.indirect %1000 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2123 = flow.variable.load.indirect %1001 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2124 = flow.variable.load.indirect %1002 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2125 = flow.variable.load.indirect %1003 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2126 = flow.variable.load.indirect %1004 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2127 = flow.variable.load.indirect %1005 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2128 = flow.variable.load.indirect %1006 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2129 = flow.variable.load.indirect %1007 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2130 = flow.variable.load.indirect %1008 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2131 = flow.variable.load.indirect %1009 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2132 = flow.variable.load.indirect %1010 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2133 = flow.variable.load.indirect %1011 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2134 = flow.variable.load.indirect %1012 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2135 = flow.variable.load.indirect %1013 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2136 = flow.variable.load.indirect %1014 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2137 = flow.variable.load.indirect %1015 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2138 = flow.variable.load.indirect %1016 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2139 = flow.variable.load.indirect %1017 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2140 = flow.variable.load.indirect %1018 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2141 = flow.variable.load.indirect %1019 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2142 = flow.variable.load.indirect %1020 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2143 = flow.variable.load.indirect %1021 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2144 = flow.variable.load.indirect %1022 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2145 = flow.variable.load.indirect %1023 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2146 = flow.variable.load.indirect %1024 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2147 = flow.variable.load.indirect %1025 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2148 = flow.variable.load.indirect %1026 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2149 = flow.variable.load.indirect %1027 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2150 = flow.variable.load.indirect %1028 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2151 = flow.variable.load.indirect %1029 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2152 = flow.variable.load.indirect %1030 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2153 = flow.variable.load.indirect %1031 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2154 = flow.variable.load.indirect %1032 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2155 = flow.variable.load.indirect %1033 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2156 = flow.variable.load.indirect %1034 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2157 = flow.variable.load.indirect %1035 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2158 = flow.variable.load.indirect %1036 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2159 = flow.variable.load.indirect %1037 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2160 = flow.variable.load.indirect %1038 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2161 = flow.variable.load.indirect %1039 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2162 = flow.variable.load.indirect %1040 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2163 = flow.variable.load.indirect %1041 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2164 = flow.variable.load.indirect %1042 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2165 = flow.variable.load.indirect %1043 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2166 = flow.variable.load.indirect %1044 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2167 = flow.variable.load.indirect %1045 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2168 = flow.variable.load.indirect %1046 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2169 = flow.variable.load.indirect %1047 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2170 = flow.variable.load.indirect %1048 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2171 = flow.variable.load.indirect %1049 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2172 = flow.variable.load.indirect %1050 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2173 = flow.variable.load.indirect %1051 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2174 = flow.variable.load.indirect %1052 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2175 = flow.variable.load.indirect %1053 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2176 = flow.variable.load.indirect %1054 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2177 = flow.variable.load.indirect %1055 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2178 = flow.variable.load.indirect %1056 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2179 = flow.variable.load.indirect %1057 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2180 = flow.variable.load.indirect %1058 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2181 = flow.variable.load.indirect %1059 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2182 = flow.variable.load.indirect %1060 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2183 = flow.variable.load.indirect %1061 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2184 = flow.variable.load.indirect %1062 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2185 = flow.variable.load.indirect %1063 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2186 = flow.variable.load.indirect %1064 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2187 = flow.variable.load.indirect %1065 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2188 = flow.variable.load.indirect %1066 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2189 = flow.variable.load.indirect %1067 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2190 = flow.variable.load.indirect %1068 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2191 = flow.variable.load.indirect %1069 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2192 = flow.variable.load.indirect %1070 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2193 = flow.variable.load.indirect %1071 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2194 = flow.variable.load.indirect %1072 : !util.ptr<tensor<128x128xf32>> -> tensor<128x128xf32> + %2195 = flow.variable.load.indirect %1073 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2196 = flow.variable.load.indirect %1074 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2197 = flow.variable.load.indirect %1075 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2198 = flow.variable.load.indirect %1076 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2199 = flow.variable.load.indirect %1077 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2200 = flow.variable.load.indirect %1078 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2201 = flow.variable.load.indirect %1079 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2202 = flow.variable.load.indirect %1080 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2203 = flow.variable.load.indirect %1081 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2204 = flow.variable.load.indirect %1082 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2205 = flow.variable.load.indirect %1083 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2206 = flow.variable.load.indirect %1084 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2207 = flow.variable.load.indirect %1085 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2208 = flow.variable.load.indirect %1086 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2209 = flow.variable.load.indirect %1087 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2210 = flow.variable.load.indirect %1088 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2211 = flow.variable.load.indirect %1089 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2212 = flow.variable.load.indirect %1090 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2213 = flow.variable.load.indirect %1091 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2214 = flow.variable.load.indirect %1092 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2215 = flow.variable.load.indirect %1093 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2216 = flow.variable.load.indirect %1094 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2217 = flow.variable.load.indirect %1095 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2218 = flow.variable.load.indirect %1096 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2219 = flow.variable.load.indirect %1097 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2220 = flow.variable.load.indirect %1098 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2221 = flow.variable.load.indirect %1099 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2222 = flow.variable.load.indirect %1100 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2223 = flow.variable.load.indirect %1101 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2224 = flow.variable.load.indirect %1102 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2225 = flow.variable.load.indirect %1103 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2226 = flow.variable.load.indirect %1104 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2227 = flow.variable.load.indirect %1105 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2228 = flow.variable.load.indirect %1106 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2229 = flow.variable.load.indirect %1107 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %2230 = flow.variable.load.indirect %1108 : !util.ptr<tensor<128x512xf32>> -> tensor<128x512xf32> + %2231 = flow.variable.load.indirect %1109 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %2232 = flow.variable.load.indirect %1110 : !util.ptr<tensor<512x128xf32>> -> tensor<512x128xf32> + %2233 = flow.variable.load.indirect %1111 : !util.ptr<tensor<2xf32>> -> tensor<2xf32> + %2234 = flow.variable.load.indirect %1112 : !util.ptr<tensor<2x512xf32>> -> tensor<2x512xf32> %2235 = "mhlo.reshape"(%arg1) : (tensor<1x384xi32>) -> tensor<1x384x1xi32> %2236 = "mhlo.torch_index_select"(%1128, %2235) {batch_dims = 0 : i64, dim = 0 : i64} : (tensor<30522x128xf32>, tensor<1x384x1xi32>) -> tensor<1x384x1x128xf32> %2237 = "mhlo.reshape"(%2236) : (tensor<1x384x1x128xf32>) -> tensor<1x384x128xf32>
diff --git a/iree/test/e2e/models/collatz.mlir b/iree/test/e2e/models/collatz.mlir index 04f625c..feffff7 100644 --- a/iree/test/e2e/models/collatz.mlir +++ b/iree/test/e2e/models/collatz.mlir
@@ -2,7 +2,7 @@ // CHECK-LABEL: EXEC @collatz func @collatz() -> tensor<f32> { - %arg0 = iree.unfoldable_constant dense<178.0> : tensor<f32> + %arg0 = util.unfoldable_constant dense<178.0> : tensor<f32> %0 = mhlo.constant dense<1.0> : tensor<f32> %1 = mhlo.constant dense<3.0> : tensor<f32> %2 = mhlo.constant dense<2.0> : tensor<f32>
diff --git a/iree/test/e2e/models/mnist_fake_weights.mlir b/iree/test/e2e/models/mnist_fake_weights.mlir index 2edf78b..cff4402 100644 --- a/iree/test/e2e/models/mnist_fake_weights.mlir +++ b/iree/test/e2e/models/mnist_fake_weights.mlir
@@ -11,17 +11,17 @@ flow.variable @"__iree_flow___sm_node25__model.layer-2.bias" dense<2.500000e-01> : tensor<10xf32> attributes {noinline, sym_visibility = "private"} // CHECK-LABEL: EXEC @predict func @predict(%arg0: tensor<1x28x28x1xf32>) -> tensor<1x10xf32> attributes {iree.module.export, iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I8!S5!k0_0R3!_0"}} { - %0 = flow.variable.address @"__iree_flow___sm_node17__model.layer-1.kernel" : !iree.ptr<tensor<784x128xf32>> - %1 = flow.variable.address @"__iree_flow___sm_node18__model.layer-1.bias" : !iree.ptr<tensor<128xf32>> - %2 = flow.variable.address @"__iree_flow___sm_node24__model.layer-2.kernel" : !iree.ptr<tensor<128x10xf32>> - %3 = flow.variable.address @"__iree_flow___sm_node25__model.layer-2.bias" : !iree.ptr<tensor<10xf32>> + %0 = flow.variable.address @"__iree_flow___sm_node17__model.layer-1.kernel" : !util.ptr<tensor<784x128xf32>> + %1 = flow.variable.address @"__iree_flow___sm_node18__model.layer-1.bias" : !util.ptr<tensor<128xf32>> + %2 = flow.variable.address @"__iree_flow___sm_node24__model.layer-2.kernel" : !util.ptr<tensor<128x10xf32>> + %3 = flow.variable.address @"__iree_flow___sm_node25__model.layer-2.bias" : !util.ptr<tensor<10xf32>> %4 = mhlo.constant dense<0.000000e+00> : tensor<1x128xf32> %5 = mhlo.constant dense<0xFF800000> : tensor<f32> %6 = mhlo.constant dense<0.000000e+00> : tensor<f32> - %7 = flow.variable.load.indirect %3 : !iree.ptr<tensor<10xf32>> -> tensor<10xf32> - %8 = flow.variable.load.indirect %2 : !iree.ptr<tensor<128x10xf32>> -> tensor<128x10xf32> - %9 = flow.variable.load.indirect %1 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %10 = flow.variable.load.indirect %0 : !iree.ptr<tensor<784x128xf32>> -> tensor<784x128xf32> + %7 = flow.variable.load.indirect %3 : !util.ptr<tensor<10xf32>> -> tensor<10xf32> + %8 = flow.variable.load.indirect %2 : !util.ptr<tensor<128x10xf32>> -> tensor<128x10xf32> + %9 = flow.variable.load.indirect %1 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %10 = flow.variable.load.indirect %0 : !util.ptr<tensor<784x128xf32>> -> tensor<784x128xf32> %11 = "mhlo.reshape"(%arg0) : (tensor<1x28x28x1xf32>) -> tensor<1x784xf32> %12 = "mhlo.dot"(%11, %10) : (tensor<1x784xf32>, tensor<784x128xf32>) -> tensor<1x128xf32> %13 = "mhlo.broadcast_in_dim"(%9) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<128xf32>) -> tensor<1x128xf32>
diff --git a/iree/test/e2e/models/mobilenetv3_fake_weights.mlir b/iree/test/e2e/models/mobilenetv3_fake_weights.mlir index 4e571d8..19cb7f7 100644 --- a/iree/test/e2e/models/mobilenetv3_fake_weights.mlir +++ b/iree/test/e2e/models/mobilenetv3_fake_weights.mlir
@@ -210,217 +210,217 @@ flow.variable @"__iree_flow___sm_node1310__m.layer-246.kernel" dense<0.00478468882> : tensor<1x1x1024x1000xf32> attributes {noinline, sym_visibility = "private"} flow.variable @"__iree_flow___sm_node1311__m.layer-246.bias" dense<0.00476190494> : tensor<1000xf32> attributes {noinline, sym_visibility = "private"} func @call() { - %arg0 = iree.unfoldable_constant dense<1.5> : tensor<1x224x224x3xf32> - %0 = flow.variable.address @"__iree_flow___sm_node260__m.layer-2.kernel" : !iree.ptr<tensor<3x3x3x16xf32>> - %1 = flow.variable.address @"__iree_flow___sm_node266__m.layer-3.gamma" : !iree.ptr<tensor<16xf32>> - %2 = flow.variable.address @"__iree_flow___sm_node267__m.layer-3.beta" : !iree.ptr<tensor<16xf32>> - %3 = flow.variable.address @"__iree_flow___sm_node268__m.layer-3.moving_mean" : !iree.ptr<tensor<16xf32>> - %4 = flow.variable.address @"__iree_flow___sm_node269__m.layer-3.moving_variance" : !iree.ptr<tensor<16xf32>> - %5 = flow.variable.address @"__iree_flow___sm_node288__m.layer-9.depthwise_kernel" : !iree.ptr<tensor<3x3x16x1xf32>> - %6 = flow.variable.address @"__iree_flow___sm_node294__m.layer-10.gamma" : !iree.ptr<tensor<16xf32>> - %7 = flow.variable.address @"__iree_flow___sm_node295__m.layer-10.beta" : !iree.ptr<tensor<16xf32>> - %8 = flow.variable.address @"__iree_flow___sm_node296__m.layer-10.moving_mean" : !iree.ptr<tensor<16xf32>> - %9 = flow.variable.address @"__iree_flow___sm_node297__m.layer-10.moving_variance" : !iree.ptr<tensor<16xf32>> - %10 = flow.variable.address @"__iree_flow___sm_node314__m.layer-14.kernel" : !iree.ptr<tensor<1x1x16x8xf32>> - %11 = flow.variable.address @"__iree_flow___sm_node315__m.layer-14.bias" : !iree.ptr<tensor<8xf32>> - %12 = flow.variable.address @"__iree_flow___sm_node324__m.layer-16.kernel" : !iree.ptr<tensor<1x1x8x16xf32>> - %13 = flow.variable.address @"__iree_flow___sm_node325__m.layer-16.bias" : !iree.ptr<tensor<16xf32>> - %14 = flow.variable.address @"__iree_flow___sm_node340__m.layer-21.kernel" : !iree.ptr<tensor<1x1x16x16xf32>> - %15 = flow.variable.address @"__iree_flow___sm_node346__m.layer-22.gamma" : !iree.ptr<tensor<16xf32>> - %16 = flow.variable.address @"__iree_flow___sm_node347__m.layer-22.beta" : !iree.ptr<tensor<16xf32>> - %17 = flow.variable.address @"__iree_flow___sm_node348__m.layer-22.moving_mean" : !iree.ptr<tensor<16xf32>> - %18 = flow.variable.address @"__iree_flow___sm_node349__m.layer-22.moving_variance" : !iree.ptr<tensor<16xf32>> - %19 = flow.variable.address @"__iree_flow___sm_node354__m.layer-23.kernel" : !iree.ptr<tensor<1x1x16x72xf32>> - %20 = flow.variable.address @"__iree_flow___sm_node360__m.layer-24.gamma" : !iree.ptr<tensor<72xf32>> - %21 = flow.variable.address @"__iree_flow___sm_node361__m.layer-24.beta" : !iree.ptr<tensor<72xf32>> - %22 = flow.variable.address @"__iree_flow___sm_node362__m.layer-24.moving_mean" : !iree.ptr<tensor<72xf32>> - %23 = flow.variable.address @"__iree_flow___sm_node363__m.layer-24.moving_variance" : !iree.ptr<tensor<72xf32>> - %24 = flow.variable.address @"__iree_flow___sm_node376__m.layer-27.depthwise_kernel" : !iree.ptr<tensor<3x3x72x1xf32>> - %25 = flow.variable.address @"__iree_flow___sm_node382__m.layer-28.gamma" : !iree.ptr<tensor<72xf32>> - %26 = flow.variable.address @"__iree_flow___sm_node383__m.layer-28.beta" : !iree.ptr<tensor<72xf32>> - %27 = flow.variable.address @"__iree_flow___sm_node384__m.layer-28.moving_mean" : !iree.ptr<tensor<72xf32>> - %28 = flow.variable.address @"__iree_flow___sm_node385__m.layer-28.moving_variance" : !iree.ptr<tensor<72xf32>> - %29 = flow.variable.address @"__iree_flow___sm_node394__m.layer-30.kernel" : !iree.ptr<tensor<1x1x72x24xf32>> - %30 = flow.variable.address @"__iree_flow___sm_node400__m.layer-31.gamma" : !iree.ptr<tensor<24xf32>> - %31 = flow.variable.address @"__iree_flow___sm_node401__m.layer-31.beta" : !iree.ptr<tensor<24xf32>> - %32 = flow.variable.address @"__iree_flow___sm_node402__m.layer-31.moving_mean" : !iree.ptr<tensor<24xf32>> - %33 = flow.variable.address @"__iree_flow___sm_node403__m.layer-31.moving_variance" : !iree.ptr<tensor<24xf32>> - %34 = flow.variable.address @"__iree_flow___sm_node408__m.layer-32.kernel" : !iree.ptr<tensor<1x1x24x88xf32>> - %35 = flow.variable.address @"__iree_flow___sm_node414__m.layer-33.gamma" : !iree.ptr<tensor<88xf32>> - %36 = flow.variable.address @"__iree_flow___sm_node415__m.layer-33.beta" : !iree.ptr<tensor<88xf32>> - %37 = flow.variable.address @"__iree_flow___sm_node416__m.layer-33.moving_mean" : !iree.ptr<tensor<88xf32>> - %38 = flow.variable.address @"__iree_flow___sm_node417__m.layer-33.moving_variance" : !iree.ptr<tensor<88xf32>> - %39 = flow.variable.address @"__iree_flow___sm_node426__m.layer-35.depthwise_kernel" : !iree.ptr<tensor<3x3x88x1xf32>> - %40 = flow.variable.address @"__iree_flow___sm_node432__m.layer-36.gamma" : !iree.ptr<tensor<88xf32>> - %41 = flow.variable.address @"__iree_flow___sm_node433__m.layer-36.beta" : !iree.ptr<tensor<88xf32>> - %42 = flow.variable.address @"__iree_flow___sm_node434__m.layer-36.moving_mean" : !iree.ptr<tensor<88xf32>> - %43 = flow.variable.address @"__iree_flow___sm_node435__m.layer-36.moving_variance" : !iree.ptr<tensor<88xf32>> - %44 = flow.variable.address @"__iree_flow___sm_node444__m.layer-38.kernel" : !iree.ptr<tensor<1x1x88x24xf32>> - %45 = flow.variable.address @"__iree_flow___sm_node450__m.layer-39.gamma" : !iree.ptr<tensor<24xf32>> - %46 = flow.variable.address @"__iree_flow___sm_node451__m.layer-39.beta" : !iree.ptr<tensor<24xf32>> - %47 = flow.variable.address @"__iree_flow___sm_node452__m.layer-39.moving_mean" : !iree.ptr<tensor<24xf32>> - %48 = flow.variable.address @"__iree_flow___sm_node453__m.layer-39.moving_variance" : !iree.ptr<tensor<24xf32>> - %49 = flow.variable.address @"__iree_flow___sm_node462__m.layer-41.kernel" : !iree.ptr<tensor<1x1x24x96xf32>> - %50 = flow.variable.address @"__iree_flow___sm_node468__m.layer-42.gamma" : !iree.ptr<tensor<96xf32>> - %51 = flow.variable.address @"__iree_flow___sm_node469__m.layer-42.beta" : !iree.ptr<tensor<96xf32>> - %52 = flow.variable.address @"__iree_flow___sm_node470__m.layer-42.moving_mean" : !iree.ptr<tensor<96xf32>> - %53 = flow.variable.address @"__iree_flow___sm_node471__m.layer-42.moving_variance" : !iree.ptr<tensor<96xf32>> - %54 = flow.variable.address @"__iree_flow___sm_node490__m.layer-48.depthwise_kernel" : !iree.ptr<tensor<5x5x96x1xf32>> - %55 = flow.variable.address @"__iree_flow___sm_node496__m.layer-49.gamma" : !iree.ptr<tensor<96xf32>> - %56 = flow.variable.address @"__iree_flow___sm_node497__m.layer-49.beta" : !iree.ptr<tensor<96xf32>> - %57 = flow.variable.address @"__iree_flow___sm_node498__m.layer-49.moving_mean" : !iree.ptr<tensor<96xf32>> - %58 = flow.variable.address @"__iree_flow___sm_node499__m.layer-49.moving_variance" : !iree.ptr<tensor<96xf32>> - %59 = flow.variable.address @"__iree_flow___sm_node522__m.layer-56.kernel" : !iree.ptr<tensor<1x1x96x24xf32>> - %60 = flow.variable.address @"__iree_flow___sm_node523__m.layer-56.bias" : !iree.ptr<tensor<24xf32>> - %61 = flow.variable.address @"__iree_flow___sm_node532__m.layer-58.kernel" : !iree.ptr<tensor<1x1x24x96xf32>> - %62 = flow.variable.address @"__iree_flow___sm_node533__m.layer-58.bias" : !iree.ptr<tensor<96xf32>> - %63 = flow.variable.address @"__iree_flow___sm_node548__m.layer-63.kernel" : !iree.ptr<tensor<1x1x96x40xf32>> - %64 = flow.variable.address @"__iree_flow___sm_node554__m.layer-64.gamma" : !iree.ptr<tensor<40xf32>> - %65 = flow.variable.address @"__iree_flow___sm_node555__m.layer-64.beta" : !iree.ptr<tensor<40xf32>> - %66 = flow.variable.address @"__iree_flow___sm_node556__m.layer-64.moving_mean" : !iree.ptr<tensor<40xf32>> - %67 = flow.variable.address @"__iree_flow___sm_node557__m.layer-64.moving_variance" : !iree.ptr<tensor<40xf32>> - %68 = flow.variable.address @"__iree_flow___sm_node562__m.layer-65.kernel" : !iree.ptr<tensor<1x1x40x240xf32>> - %69 = flow.variable.address @"__iree_flow___sm_node568__m.layer-66.gamma" : !iree.ptr<tensor<240xf32>> - %70 = flow.variable.address @"__iree_flow___sm_node569__m.layer-66.beta" : !iree.ptr<tensor<240xf32>> - %71 = flow.variable.address @"__iree_flow___sm_node570__m.layer-66.moving_mean" : !iree.ptr<tensor<240xf32>> - %72 = flow.variable.address @"__iree_flow___sm_node571__m.layer-66.moving_variance" : !iree.ptr<tensor<240xf32>> - %73 = flow.variable.address @"__iree_flow___sm_node586__m.layer-71.depthwise_kernel" : !iree.ptr<tensor<5x5x240x1xf32>> - %74 = flow.variable.address @"__iree_flow___sm_node592__m.layer-72.gamma" : !iree.ptr<tensor<240xf32>> - %75 = flow.variable.address @"__iree_flow___sm_node593__m.layer-72.beta" : !iree.ptr<tensor<240xf32>> - %76 = flow.variable.address @"__iree_flow___sm_node594__m.layer-72.moving_mean" : !iree.ptr<tensor<240xf32>> - %77 = flow.variable.address @"__iree_flow___sm_node595__m.layer-72.moving_variance" : !iree.ptr<tensor<240xf32>> - %78 = flow.variable.address @"__iree_flow___sm_node618__m.layer-79.kernel" : !iree.ptr<tensor<1x1x240x64xf32>> - %79 = flow.variable.address @"__iree_flow___sm_node619__m.layer-79.bias" : !iree.ptr<tensor<64xf32>> - %80 = flow.variable.address @"__iree_flow___sm_node628__m.layer-81.kernel" : !iree.ptr<tensor<1x1x64x240xf32>> - %81 = flow.variable.address @"__iree_flow___sm_node629__m.layer-81.bias" : !iree.ptr<tensor<240xf32>> - %82 = flow.variable.address @"__iree_flow___sm_node644__m.layer-86.kernel" : !iree.ptr<tensor<1x1x240x40xf32>> - %83 = flow.variable.address @"__iree_flow___sm_node650__m.layer-87.gamma" : !iree.ptr<tensor<40xf32>> - %84 = flow.variable.address @"__iree_flow___sm_node651__m.layer-87.beta" : !iree.ptr<tensor<40xf32>> - %85 = flow.variable.address @"__iree_flow___sm_node652__m.layer-87.moving_mean" : !iree.ptr<tensor<40xf32>> - %86 = flow.variable.address @"__iree_flow___sm_node653__m.layer-87.moving_variance" : !iree.ptr<tensor<40xf32>> - %87 = flow.variable.address @"__iree_flow___sm_node662__m.layer-89.kernel" : !iree.ptr<tensor<1x1x40x240xf32>> - %88 = flow.variable.address @"__iree_flow___sm_node668__m.layer-90.gamma" : !iree.ptr<tensor<240xf32>> - %89 = flow.variable.address @"__iree_flow___sm_node669__m.layer-90.beta" : !iree.ptr<tensor<240xf32>> - %90 = flow.variable.address @"__iree_flow___sm_node670__m.layer-90.moving_mean" : !iree.ptr<tensor<240xf32>> - %91 = flow.variable.address @"__iree_flow___sm_node671__m.layer-90.moving_variance" : !iree.ptr<tensor<240xf32>> - %92 = flow.variable.address @"__iree_flow___sm_node686__m.layer-95.depthwise_kernel" : !iree.ptr<tensor<5x5x240x1xf32>> - %93 = flow.variable.address @"__iree_flow___sm_node692__m.layer-96.gamma" : !iree.ptr<tensor<240xf32>> - %94 = flow.variable.address @"__iree_flow___sm_node693__m.layer-96.beta" : !iree.ptr<tensor<240xf32>> - %95 = flow.variable.address @"__iree_flow___sm_node694__m.layer-96.moving_mean" : !iree.ptr<tensor<240xf32>> - %96 = flow.variable.address @"__iree_flow___sm_node695__m.layer-96.moving_variance" : !iree.ptr<tensor<240xf32>> - %97 = flow.variable.address @"__iree_flow___sm_node718__m.layer-103.kernel" : !iree.ptr<tensor<1x1x240x64xf32>> - %98 = flow.variable.address @"__iree_flow___sm_node719__m.layer-103.bias" : !iree.ptr<tensor<64xf32>> - %99 = flow.variable.address @"__iree_flow___sm_node728__m.layer-105.kernel" : !iree.ptr<tensor<1x1x64x240xf32>> - %100 = flow.variable.address @"__iree_flow___sm_node729__m.layer-105.bias" : !iree.ptr<tensor<240xf32>> - %101 = flow.variable.address @"__iree_flow___sm_node744__m.layer-110.kernel" : !iree.ptr<tensor<1x1x240x40xf32>> - %102 = flow.variable.address @"__iree_flow___sm_node750__m.layer-111.gamma" : !iree.ptr<tensor<40xf32>> - %103 = flow.variable.address @"__iree_flow___sm_node751__m.layer-111.beta" : !iree.ptr<tensor<40xf32>> - %104 = flow.variable.address @"__iree_flow___sm_node752__m.layer-111.moving_mean" : !iree.ptr<tensor<40xf32>> - %105 = flow.variable.address @"__iree_flow___sm_node753__m.layer-111.moving_variance" : !iree.ptr<tensor<40xf32>> - %106 = flow.variable.address @"__iree_flow___sm_node762__m.layer-113.kernel" : !iree.ptr<tensor<1x1x40x120xf32>> - %107 = flow.variable.address @"__iree_flow___sm_node768__m.layer-114.gamma" : !iree.ptr<tensor<120xf32>> - %108 = flow.variable.address @"__iree_flow___sm_node769__m.layer-114.beta" : !iree.ptr<tensor<120xf32>> - %109 = flow.variable.address @"__iree_flow___sm_node770__m.layer-114.moving_mean" : !iree.ptr<tensor<120xf32>> - %110 = flow.variable.address @"__iree_flow___sm_node771__m.layer-114.moving_variance" : !iree.ptr<tensor<120xf32>> - %111 = flow.variable.address @"__iree_flow___sm_node786__m.layer-119.depthwise_kernel" : !iree.ptr<tensor<5x5x120x1xf32>> - %112 = flow.variable.address @"__iree_flow___sm_node792__m.layer-120.gamma" : !iree.ptr<tensor<120xf32>> - %113 = flow.variable.address @"__iree_flow___sm_node793__m.layer-120.beta" : !iree.ptr<tensor<120xf32>> - %114 = flow.variable.address @"__iree_flow___sm_node794__m.layer-120.moving_mean" : !iree.ptr<tensor<120xf32>> - %115 = flow.variable.address @"__iree_flow___sm_node795__m.layer-120.moving_variance" : !iree.ptr<tensor<120xf32>> - %116 = flow.variable.address @"__iree_flow___sm_node818__m.layer-127.kernel" : !iree.ptr<tensor<1x1x120x32xf32>> - %117 = flow.variable.address @"__iree_flow___sm_node819__m.layer-127.bias" : !iree.ptr<tensor<32xf32>> - %118 = flow.variable.address @"__iree_flow___sm_node828__m.layer-129.kernel" : !iree.ptr<tensor<1x1x32x120xf32>> - %119 = flow.variable.address @"__iree_flow___sm_node829__m.layer-129.bias" : !iree.ptr<tensor<120xf32>> - %120 = flow.variable.address @"__iree_flow___sm_node844__m.layer-134.kernel" : !iree.ptr<tensor<1x1x120x48xf32>> - %121 = flow.variable.address @"__iree_flow___sm_node850__m.layer-135.gamma" : !iree.ptr<tensor<48xf32>> - %122 = flow.variable.address @"__iree_flow___sm_node851__m.layer-135.beta" : !iree.ptr<tensor<48xf32>> - %123 = flow.variable.address @"__iree_flow___sm_node852__m.layer-135.moving_mean" : !iree.ptr<tensor<48xf32>> - %124 = flow.variable.address @"__iree_flow___sm_node853__m.layer-135.moving_variance" : !iree.ptr<tensor<48xf32>> - %125 = flow.variable.address @"__iree_flow___sm_node858__m.layer-136.kernel" : !iree.ptr<tensor<1x1x48x144xf32>> - %126 = flow.variable.address @"__iree_flow___sm_node864__m.layer-137.gamma" : !iree.ptr<tensor<144xf32>> - %127 = flow.variable.address @"__iree_flow___sm_node865__m.layer-137.beta" : !iree.ptr<tensor<144xf32>> - %128 = flow.variable.address @"__iree_flow___sm_node866__m.layer-137.moving_mean" : !iree.ptr<tensor<144xf32>> - %129 = flow.variable.address @"__iree_flow___sm_node867__m.layer-137.moving_variance" : !iree.ptr<tensor<144xf32>> - %130 = flow.variable.address @"__iree_flow___sm_node882__m.layer-142.depthwise_kernel" : !iree.ptr<tensor<5x5x144x1xf32>> - %131 = flow.variable.address @"__iree_flow___sm_node888__m.layer-143.gamma" : !iree.ptr<tensor<144xf32>> - %132 = flow.variable.address @"__iree_flow___sm_node889__m.layer-143.beta" : !iree.ptr<tensor<144xf32>> - %133 = flow.variable.address @"__iree_flow___sm_node890__m.layer-143.moving_mean" : !iree.ptr<tensor<144xf32>> - %134 = flow.variable.address @"__iree_flow___sm_node891__m.layer-143.moving_variance" : !iree.ptr<tensor<144xf32>> - %135 = flow.variable.address @"__iree_flow___sm_node914__m.layer-150.kernel" : !iree.ptr<tensor<1x1x144x40xf32>> - %136 = flow.variable.address @"__iree_flow___sm_node915__m.layer-150.bias" : !iree.ptr<tensor<40xf32>> - %137 = flow.variable.address @"__iree_flow___sm_node924__m.layer-152.kernel" : !iree.ptr<tensor<1x1x40x144xf32>> - %138 = flow.variable.address @"__iree_flow___sm_node925__m.layer-152.bias" : !iree.ptr<tensor<144xf32>> - %139 = flow.variable.address @"__iree_flow___sm_node940__m.layer-157.kernel" : !iree.ptr<tensor<1x1x144x48xf32>> - %140 = flow.variable.address @"__iree_flow___sm_node946__m.layer-158.gamma" : !iree.ptr<tensor<48xf32>> - %141 = flow.variable.address @"__iree_flow___sm_node947__m.layer-158.beta" : !iree.ptr<tensor<48xf32>> - %142 = flow.variable.address @"__iree_flow___sm_node948__m.layer-158.moving_mean" : !iree.ptr<tensor<48xf32>> - %143 = flow.variable.address @"__iree_flow___sm_node949__m.layer-158.moving_variance" : !iree.ptr<tensor<48xf32>> - %144 = flow.variable.address @"__iree_flow___sm_node958__m.layer-160.kernel" : !iree.ptr<tensor<1x1x48x288xf32>> - %145 = flow.variable.address @"__iree_flow___sm_node964__m.layer-161.gamma" : !iree.ptr<tensor<288xf32>> - %146 = flow.variable.address @"__iree_flow___sm_node965__m.layer-161.beta" : !iree.ptr<tensor<288xf32>> - %147 = flow.variable.address @"__iree_flow___sm_node966__m.layer-161.moving_mean" : !iree.ptr<tensor<288xf32>> - %148 = flow.variable.address @"__iree_flow___sm_node967__m.layer-161.moving_variance" : !iree.ptr<tensor<288xf32>> - %149 = flow.variable.address @"__iree_flow___sm_node986__m.layer-167.depthwise_kernel" : !iree.ptr<tensor<5x5x288x1xf32>> - %150 = flow.variable.address @"__iree_flow___sm_node992__m.layer-168.gamma" : !iree.ptr<tensor<288xf32>> - %151 = flow.variable.address @"__iree_flow___sm_node993__m.layer-168.beta" : !iree.ptr<tensor<288xf32>> - %152 = flow.variable.address @"__iree_flow___sm_node994__m.layer-168.moving_mean" : !iree.ptr<tensor<288xf32>> - %153 = flow.variable.address @"__iree_flow___sm_node995__m.layer-168.moving_variance" : !iree.ptr<tensor<288xf32>> - %154 = flow.variable.address @"__iree_flow___sm_node1018__m.layer-175.kernel" : !iree.ptr<tensor<1x1x288x72xf32>> - %155 = flow.variable.address @"__iree_flow___sm_node1019__m.layer-175.bias" : !iree.ptr<tensor<72xf32>> - %156 = flow.variable.address @"__iree_flow___sm_node1028__m.layer-177.kernel" : !iree.ptr<tensor<1x1x72x288xf32>> - %157 = flow.variable.address @"__iree_flow___sm_node1029__m.layer-177.bias" : !iree.ptr<tensor<288xf32>> - %158 = flow.variable.address @"__iree_flow___sm_node1044__m.layer-182.kernel" : !iree.ptr<tensor<1x1x288x96xf32>> - %159 = flow.variable.address @"__iree_flow___sm_node1050__m.layer-183.gamma" : !iree.ptr<tensor<96xf32>> - %160 = flow.variable.address @"__iree_flow___sm_node1051__m.layer-183.beta" : !iree.ptr<tensor<96xf32>> - %161 = flow.variable.address @"__iree_flow___sm_node1052__m.layer-183.moving_mean" : !iree.ptr<tensor<96xf32>> - %162 = flow.variable.address @"__iree_flow___sm_node1053__m.layer-183.moving_variance" : !iree.ptr<tensor<96xf32>> - %163 = flow.variable.address @"__iree_flow___sm_node1058__m.layer-184.kernel" : !iree.ptr<tensor<1x1x96x576xf32>> - %164 = flow.variable.address @"__iree_flow___sm_node1064__m.layer-185.gamma" : !iree.ptr<tensor<576xf32>> - %165 = flow.variable.address @"__iree_flow___sm_node1065__m.layer-185.beta" : !iree.ptr<tensor<576xf32>> - %166 = flow.variable.address @"__iree_flow___sm_node1066__m.layer-185.moving_mean" : !iree.ptr<tensor<576xf32>> - %167 = flow.variable.address @"__iree_flow___sm_node1067__m.layer-185.moving_variance" : !iree.ptr<tensor<576xf32>> - %168 = flow.variable.address @"__iree_flow___sm_node1082__m.layer-190.depthwise_kernel" : !iree.ptr<tensor<5x5x576x1xf32>> - %169 = flow.variable.address @"__iree_flow___sm_node1088__m.layer-191.gamma" : !iree.ptr<tensor<576xf32>> - %170 = flow.variable.address @"__iree_flow___sm_node1089__m.layer-191.beta" : !iree.ptr<tensor<576xf32>> - %171 = flow.variable.address @"__iree_flow___sm_node1090__m.layer-191.moving_mean" : !iree.ptr<tensor<576xf32>> - %172 = flow.variable.address @"__iree_flow___sm_node1091__m.layer-191.moving_variance" : !iree.ptr<tensor<576xf32>> - %173 = flow.variable.address @"__iree_flow___sm_node1114__m.layer-198.kernel" : !iree.ptr<tensor<1x1x576x144xf32>> - %174 = flow.variable.address @"__iree_flow___sm_node1115__m.layer-198.bias" : !iree.ptr<tensor<144xf32>> - %175 = flow.variable.address @"__iree_flow___sm_node1124__m.layer-200.kernel" : !iree.ptr<tensor<1x1x144x576xf32>> - %176 = flow.variable.address @"__iree_flow___sm_node1125__m.layer-200.bias" : !iree.ptr<tensor<576xf32>> - %177 = flow.variable.address @"__iree_flow___sm_node1140__m.layer-205.kernel" : !iree.ptr<tensor<1x1x576x96xf32>> - %178 = flow.variable.address @"__iree_flow___sm_node1146__m.layer-206.gamma" : !iree.ptr<tensor<96xf32>> - %179 = flow.variable.address @"__iree_flow___sm_node1147__m.layer-206.beta" : !iree.ptr<tensor<96xf32>> - %180 = flow.variable.address @"__iree_flow___sm_node1148__m.layer-206.moving_mean" : !iree.ptr<tensor<96xf32>> - %181 = flow.variable.address @"__iree_flow___sm_node1149__m.layer-206.moving_variance" : !iree.ptr<tensor<96xf32>> - %182 = flow.variable.address @"__iree_flow___sm_node1158__m.layer-208.kernel" : !iree.ptr<tensor<1x1x96x576xf32>> - %183 = flow.variable.address @"__iree_flow___sm_node1164__m.layer-209.gamma" : !iree.ptr<tensor<576xf32>> - %184 = flow.variable.address @"__iree_flow___sm_node1165__m.layer-209.beta" : !iree.ptr<tensor<576xf32>> - %185 = flow.variable.address @"__iree_flow___sm_node1166__m.layer-209.moving_mean" : !iree.ptr<tensor<576xf32>> - %186 = flow.variable.address @"__iree_flow___sm_node1167__m.layer-209.moving_variance" : !iree.ptr<tensor<576xf32>> - %187 = flow.variable.address @"__iree_flow___sm_node1182__m.layer-214.depthwise_kernel" : !iree.ptr<tensor<5x5x576x1xf32>> - %188 = flow.variable.address @"__iree_flow___sm_node1188__m.layer-215.gamma" : !iree.ptr<tensor<576xf32>> - %189 = flow.variable.address @"__iree_flow___sm_node1189__m.layer-215.beta" : !iree.ptr<tensor<576xf32>> - %190 = flow.variable.address @"__iree_flow___sm_node1190__m.layer-215.moving_mean" : !iree.ptr<tensor<576xf32>> - %191 = flow.variable.address @"__iree_flow___sm_node1191__m.layer-215.moving_variance" : !iree.ptr<tensor<576xf32>> - %192 = flow.variable.address @"__iree_flow___sm_node1214__m.layer-222.kernel" : !iree.ptr<tensor<1x1x576x144xf32>> - %193 = flow.variable.address @"__iree_flow___sm_node1215__m.layer-222.bias" : !iree.ptr<tensor<144xf32>> - %194 = flow.variable.address @"__iree_flow___sm_node1224__m.layer-224.kernel" : !iree.ptr<tensor<1x1x144x576xf32>> - %195 = flow.variable.address @"__iree_flow___sm_node1225__m.layer-224.bias" : !iree.ptr<tensor<576xf32>> - %196 = flow.variable.address @"__iree_flow___sm_node1240__m.layer-229.kernel" : !iree.ptr<tensor<1x1x576x96xf32>> - %197 = flow.variable.address @"__iree_flow___sm_node1246__m.layer-230.gamma" : !iree.ptr<tensor<96xf32>> - %198 = flow.variable.address @"__iree_flow___sm_node1247__m.layer-230.beta" : !iree.ptr<tensor<96xf32>> - %199 = flow.variable.address @"__iree_flow___sm_node1248__m.layer-230.moving_mean" : !iree.ptr<tensor<96xf32>> - %200 = flow.variable.address @"__iree_flow___sm_node1249__m.layer-230.moving_variance" : !iree.ptr<tensor<96xf32>> - %201 = flow.variable.address @"__iree_flow___sm_node1258__m.layer-232.kernel" : !iree.ptr<tensor<1x1x96x576xf32>> - %202 = flow.variable.address @"__iree_flow___sm_node1264__m.layer-233.gamma" : !iree.ptr<tensor<576xf32>> - %203 = flow.variable.address @"__iree_flow___sm_node1265__m.layer-233.beta" : !iree.ptr<tensor<576xf32>> - %204 = flow.variable.address @"__iree_flow___sm_node1266__m.layer-233.moving_mean" : !iree.ptr<tensor<576xf32>> - %205 = flow.variable.address @"__iree_flow___sm_node1267__m.layer-233.moving_variance" : !iree.ptr<tensor<576xf32>> - %206 = flow.variable.address @"__iree_flow___sm_node1290__m.layer-240.kernel" : !iree.ptr<tensor<1x1x576x1024xf32>> - %207 = flow.variable.address @"__iree_flow___sm_node1291__m.layer-240.bias" : !iree.ptr<tensor<1024xf32>> - %208 = flow.variable.address @"__iree_flow___sm_node1310__m.layer-246.kernel" : !iree.ptr<tensor<1x1x1024x1000xf32>> - %209 = flow.variable.address @"__iree_flow___sm_node1311__m.layer-246.bias" : !iree.ptr<tensor<1000xf32>> + %arg0 = util.unfoldable_constant dense<1.5> : tensor<1x224x224x3xf32> + %0 = flow.variable.address @"__iree_flow___sm_node260__m.layer-2.kernel" : !util.ptr<tensor<3x3x3x16xf32>> + %1 = flow.variable.address @"__iree_flow___sm_node266__m.layer-3.gamma" : !util.ptr<tensor<16xf32>> + %2 = flow.variable.address @"__iree_flow___sm_node267__m.layer-3.beta" : !util.ptr<tensor<16xf32>> + %3 = flow.variable.address @"__iree_flow___sm_node268__m.layer-3.moving_mean" : !util.ptr<tensor<16xf32>> + %4 = flow.variable.address @"__iree_flow___sm_node269__m.layer-3.moving_variance" : !util.ptr<tensor<16xf32>> + %5 = flow.variable.address @"__iree_flow___sm_node288__m.layer-9.depthwise_kernel" : !util.ptr<tensor<3x3x16x1xf32>> + %6 = flow.variable.address @"__iree_flow___sm_node294__m.layer-10.gamma" : !util.ptr<tensor<16xf32>> + %7 = flow.variable.address @"__iree_flow___sm_node295__m.layer-10.beta" : !util.ptr<tensor<16xf32>> + %8 = flow.variable.address @"__iree_flow___sm_node296__m.layer-10.moving_mean" : !util.ptr<tensor<16xf32>> + %9 = flow.variable.address @"__iree_flow___sm_node297__m.layer-10.moving_variance" : !util.ptr<tensor<16xf32>> + %10 = flow.variable.address @"__iree_flow___sm_node314__m.layer-14.kernel" : !util.ptr<tensor<1x1x16x8xf32>> + %11 = flow.variable.address @"__iree_flow___sm_node315__m.layer-14.bias" : !util.ptr<tensor<8xf32>> + %12 = flow.variable.address @"__iree_flow___sm_node324__m.layer-16.kernel" : !util.ptr<tensor<1x1x8x16xf32>> + %13 = flow.variable.address @"__iree_flow___sm_node325__m.layer-16.bias" : !util.ptr<tensor<16xf32>> + %14 = flow.variable.address @"__iree_flow___sm_node340__m.layer-21.kernel" : !util.ptr<tensor<1x1x16x16xf32>> + %15 = flow.variable.address @"__iree_flow___sm_node346__m.layer-22.gamma" : !util.ptr<tensor<16xf32>> + %16 = flow.variable.address @"__iree_flow___sm_node347__m.layer-22.beta" : !util.ptr<tensor<16xf32>> + %17 = flow.variable.address @"__iree_flow___sm_node348__m.layer-22.moving_mean" : !util.ptr<tensor<16xf32>> + %18 = flow.variable.address @"__iree_flow___sm_node349__m.layer-22.moving_variance" : !util.ptr<tensor<16xf32>> + %19 = flow.variable.address @"__iree_flow___sm_node354__m.layer-23.kernel" : !util.ptr<tensor<1x1x16x72xf32>> + %20 = flow.variable.address @"__iree_flow___sm_node360__m.layer-24.gamma" : !util.ptr<tensor<72xf32>> + %21 = flow.variable.address @"__iree_flow___sm_node361__m.layer-24.beta" : !util.ptr<tensor<72xf32>> + %22 = flow.variable.address @"__iree_flow___sm_node362__m.layer-24.moving_mean" : !util.ptr<tensor<72xf32>> + %23 = flow.variable.address @"__iree_flow___sm_node363__m.layer-24.moving_variance" : !util.ptr<tensor<72xf32>> + %24 = flow.variable.address @"__iree_flow___sm_node376__m.layer-27.depthwise_kernel" : !util.ptr<tensor<3x3x72x1xf32>> + %25 = flow.variable.address @"__iree_flow___sm_node382__m.layer-28.gamma" : !util.ptr<tensor<72xf32>> + %26 = flow.variable.address @"__iree_flow___sm_node383__m.layer-28.beta" : !util.ptr<tensor<72xf32>> + %27 = flow.variable.address @"__iree_flow___sm_node384__m.layer-28.moving_mean" : !util.ptr<tensor<72xf32>> + %28 = flow.variable.address @"__iree_flow___sm_node385__m.layer-28.moving_variance" : !util.ptr<tensor<72xf32>> + %29 = flow.variable.address @"__iree_flow___sm_node394__m.layer-30.kernel" : !util.ptr<tensor<1x1x72x24xf32>> + %30 = flow.variable.address @"__iree_flow___sm_node400__m.layer-31.gamma" : !util.ptr<tensor<24xf32>> + %31 = flow.variable.address @"__iree_flow___sm_node401__m.layer-31.beta" : !util.ptr<tensor<24xf32>> + %32 = flow.variable.address @"__iree_flow___sm_node402__m.layer-31.moving_mean" : !util.ptr<tensor<24xf32>> + %33 = flow.variable.address @"__iree_flow___sm_node403__m.layer-31.moving_variance" : !util.ptr<tensor<24xf32>> + %34 = flow.variable.address @"__iree_flow___sm_node408__m.layer-32.kernel" : !util.ptr<tensor<1x1x24x88xf32>> + %35 = flow.variable.address @"__iree_flow___sm_node414__m.layer-33.gamma" : !util.ptr<tensor<88xf32>> + %36 = flow.variable.address @"__iree_flow___sm_node415__m.layer-33.beta" : !util.ptr<tensor<88xf32>> + %37 = flow.variable.address @"__iree_flow___sm_node416__m.layer-33.moving_mean" : !util.ptr<tensor<88xf32>> + %38 = flow.variable.address @"__iree_flow___sm_node417__m.layer-33.moving_variance" : !util.ptr<tensor<88xf32>> + %39 = flow.variable.address @"__iree_flow___sm_node426__m.layer-35.depthwise_kernel" : !util.ptr<tensor<3x3x88x1xf32>> + %40 = flow.variable.address @"__iree_flow___sm_node432__m.layer-36.gamma" : !util.ptr<tensor<88xf32>> + %41 = flow.variable.address @"__iree_flow___sm_node433__m.layer-36.beta" : !util.ptr<tensor<88xf32>> + %42 = flow.variable.address @"__iree_flow___sm_node434__m.layer-36.moving_mean" : !util.ptr<tensor<88xf32>> + %43 = flow.variable.address @"__iree_flow___sm_node435__m.layer-36.moving_variance" : !util.ptr<tensor<88xf32>> + %44 = flow.variable.address @"__iree_flow___sm_node444__m.layer-38.kernel" : !util.ptr<tensor<1x1x88x24xf32>> + %45 = flow.variable.address @"__iree_flow___sm_node450__m.layer-39.gamma" : !util.ptr<tensor<24xf32>> + %46 = flow.variable.address @"__iree_flow___sm_node451__m.layer-39.beta" : !util.ptr<tensor<24xf32>> + %47 = flow.variable.address @"__iree_flow___sm_node452__m.layer-39.moving_mean" : !util.ptr<tensor<24xf32>> + %48 = flow.variable.address @"__iree_flow___sm_node453__m.layer-39.moving_variance" : !util.ptr<tensor<24xf32>> + %49 = flow.variable.address @"__iree_flow___sm_node462__m.layer-41.kernel" : !util.ptr<tensor<1x1x24x96xf32>> + %50 = flow.variable.address @"__iree_flow___sm_node468__m.layer-42.gamma" : !util.ptr<tensor<96xf32>> + %51 = flow.variable.address @"__iree_flow___sm_node469__m.layer-42.beta" : !util.ptr<tensor<96xf32>> + %52 = flow.variable.address @"__iree_flow___sm_node470__m.layer-42.moving_mean" : !util.ptr<tensor<96xf32>> + %53 = flow.variable.address @"__iree_flow___sm_node471__m.layer-42.moving_variance" : !util.ptr<tensor<96xf32>> + %54 = flow.variable.address @"__iree_flow___sm_node490__m.layer-48.depthwise_kernel" : !util.ptr<tensor<5x5x96x1xf32>> + %55 = flow.variable.address @"__iree_flow___sm_node496__m.layer-49.gamma" : !util.ptr<tensor<96xf32>> + %56 = flow.variable.address @"__iree_flow___sm_node497__m.layer-49.beta" : !util.ptr<tensor<96xf32>> + %57 = flow.variable.address @"__iree_flow___sm_node498__m.layer-49.moving_mean" : !util.ptr<tensor<96xf32>> + %58 = flow.variable.address @"__iree_flow___sm_node499__m.layer-49.moving_variance" : !util.ptr<tensor<96xf32>> + %59 = flow.variable.address @"__iree_flow___sm_node522__m.layer-56.kernel" : !util.ptr<tensor<1x1x96x24xf32>> + %60 = flow.variable.address @"__iree_flow___sm_node523__m.layer-56.bias" : !util.ptr<tensor<24xf32>> + %61 = flow.variable.address @"__iree_flow___sm_node532__m.layer-58.kernel" : !util.ptr<tensor<1x1x24x96xf32>> + %62 = flow.variable.address @"__iree_flow___sm_node533__m.layer-58.bias" : !util.ptr<tensor<96xf32>> + %63 = flow.variable.address @"__iree_flow___sm_node548__m.layer-63.kernel" : !util.ptr<tensor<1x1x96x40xf32>> + %64 = flow.variable.address @"__iree_flow___sm_node554__m.layer-64.gamma" : !util.ptr<tensor<40xf32>> + %65 = flow.variable.address @"__iree_flow___sm_node555__m.layer-64.beta" : !util.ptr<tensor<40xf32>> + %66 = flow.variable.address @"__iree_flow___sm_node556__m.layer-64.moving_mean" : !util.ptr<tensor<40xf32>> + %67 = flow.variable.address @"__iree_flow___sm_node557__m.layer-64.moving_variance" : !util.ptr<tensor<40xf32>> + %68 = flow.variable.address @"__iree_flow___sm_node562__m.layer-65.kernel" : !util.ptr<tensor<1x1x40x240xf32>> + %69 = flow.variable.address @"__iree_flow___sm_node568__m.layer-66.gamma" : !util.ptr<tensor<240xf32>> + %70 = flow.variable.address @"__iree_flow___sm_node569__m.layer-66.beta" : !util.ptr<tensor<240xf32>> + %71 = flow.variable.address @"__iree_flow___sm_node570__m.layer-66.moving_mean" : !util.ptr<tensor<240xf32>> + %72 = flow.variable.address @"__iree_flow___sm_node571__m.layer-66.moving_variance" : !util.ptr<tensor<240xf32>> + %73 = flow.variable.address @"__iree_flow___sm_node586__m.layer-71.depthwise_kernel" : !util.ptr<tensor<5x5x240x1xf32>> + %74 = flow.variable.address @"__iree_flow___sm_node592__m.layer-72.gamma" : !util.ptr<tensor<240xf32>> + %75 = flow.variable.address @"__iree_flow___sm_node593__m.layer-72.beta" : !util.ptr<tensor<240xf32>> + %76 = flow.variable.address @"__iree_flow___sm_node594__m.layer-72.moving_mean" : !util.ptr<tensor<240xf32>> + %77 = flow.variable.address @"__iree_flow___sm_node595__m.layer-72.moving_variance" : !util.ptr<tensor<240xf32>> + %78 = flow.variable.address @"__iree_flow___sm_node618__m.layer-79.kernel" : !util.ptr<tensor<1x1x240x64xf32>> + %79 = flow.variable.address @"__iree_flow___sm_node619__m.layer-79.bias" : !util.ptr<tensor<64xf32>> + %80 = flow.variable.address @"__iree_flow___sm_node628__m.layer-81.kernel" : !util.ptr<tensor<1x1x64x240xf32>> + %81 = flow.variable.address @"__iree_flow___sm_node629__m.layer-81.bias" : !util.ptr<tensor<240xf32>> + %82 = flow.variable.address @"__iree_flow___sm_node644__m.layer-86.kernel" : !util.ptr<tensor<1x1x240x40xf32>> + %83 = flow.variable.address @"__iree_flow___sm_node650__m.layer-87.gamma" : !util.ptr<tensor<40xf32>> + %84 = flow.variable.address @"__iree_flow___sm_node651__m.layer-87.beta" : !util.ptr<tensor<40xf32>> + %85 = flow.variable.address @"__iree_flow___sm_node652__m.layer-87.moving_mean" : !util.ptr<tensor<40xf32>> + %86 = flow.variable.address @"__iree_flow___sm_node653__m.layer-87.moving_variance" : !util.ptr<tensor<40xf32>> + %87 = flow.variable.address @"__iree_flow___sm_node662__m.layer-89.kernel" : !util.ptr<tensor<1x1x40x240xf32>> + %88 = flow.variable.address @"__iree_flow___sm_node668__m.layer-90.gamma" : !util.ptr<tensor<240xf32>> + %89 = flow.variable.address @"__iree_flow___sm_node669__m.layer-90.beta" : !util.ptr<tensor<240xf32>> + %90 = flow.variable.address @"__iree_flow___sm_node670__m.layer-90.moving_mean" : !util.ptr<tensor<240xf32>> + %91 = flow.variable.address @"__iree_flow___sm_node671__m.layer-90.moving_variance" : !util.ptr<tensor<240xf32>> + %92 = flow.variable.address @"__iree_flow___sm_node686__m.layer-95.depthwise_kernel" : !util.ptr<tensor<5x5x240x1xf32>> + %93 = flow.variable.address @"__iree_flow___sm_node692__m.layer-96.gamma" : !util.ptr<tensor<240xf32>> + %94 = flow.variable.address @"__iree_flow___sm_node693__m.layer-96.beta" : !util.ptr<tensor<240xf32>> + %95 = flow.variable.address @"__iree_flow___sm_node694__m.layer-96.moving_mean" : !util.ptr<tensor<240xf32>> + %96 = flow.variable.address @"__iree_flow___sm_node695__m.layer-96.moving_variance" : !util.ptr<tensor<240xf32>> + %97 = flow.variable.address @"__iree_flow___sm_node718__m.layer-103.kernel" : !util.ptr<tensor<1x1x240x64xf32>> + %98 = flow.variable.address @"__iree_flow___sm_node719__m.layer-103.bias" : !util.ptr<tensor<64xf32>> + %99 = flow.variable.address @"__iree_flow___sm_node728__m.layer-105.kernel" : !util.ptr<tensor<1x1x64x240xf32>> + %100 = flow.variable.address @"__iree_flow___sm_node729__m.layer-105.bias" : !util.ptr<tensor<240xf32>> + %101 = flow.variable.address @"__iree_flow___sm_node744__m.layer-110.kernel" : !util.ptr<tensor<1x1x240x40xf32>> + %102 = flow.variable.address @"__iree_flow___sm_node750__m.layer-111.gamma" : !util.ptr<tensor<40xf32>> + %103 = flow.variable.address @"__iree_flow___sm_node751__m.layer-111.beta" : !util.ptr<tensor<40xf32>> + %104 = flow.variable.address @"__iree_flow___sm_node752__m.layer-111.moving_mean" : !util.ptr<tensor<40xf32>> + %105 = flow.variable.address @"__iree_flow___sm_node753__m.layer-111.moving_variance" : !util.ptr<tensor<40xf32>> + %106 = flow.variable.address @"__iree_flow___sm_node762__m.layer-113.kernel" : !util.ptr<tensor<1x1x40x120xf32>> + %107 = flow.variable.address @"__iree_flow___sm_node768__m.layer-114.gamma" : !util.ptr<tensor<120xf32>> + %108 = flow.variable.address @"__iree_flow___sm_node769__m.layer-114.beta" : !util.ptr<tensor<120xf32>> + %109 = flow.variable.address @"__iree_flow___sm_node770__m.layer-114.moving_mean" : !util.ptr<tensor<120xf32>> + %110 = flow.variable.address @"__iree_flow___sm_node771__m.layer-114.moving_variance" : !util.ptr<tensor<120xf32>> + %111 = flow.variable.address @"__iree_flow___sm_node786__m.layer-119.depthwise_kernel" : !util.ptr<tensor<5x5x120x1xf32>> + %112 = flow.variable.address @"__iree_flow___sm_node792__m.layer-120.gamma" : !util.ptr<tensor<120xf32>> + %113 = flow.variable.address @"__iree_flow___sm_node793__m.layer-120.beta" : !util.ptr<tensor<120xf32>> + %114 = flow.variable.address @"__iree_flow___sm_node794__m.layer-120.moving_mean" : !util.ptr<tensor<120xf32>> + %115 = flow.variable.address @"__iree_flow___sm_node795__m.layer-120.moving_variance" : !util.ptr<tensor<120xf32>> + %116 = flow.variable.address @"__iree_flow___sm_node818__m.layer-127.kernel" : !util.ptr<tensor<1x1x120x32xf32>> + %117 = flow.variable.address @"__iree_flow___sm_node819__m.layer-127.bias" : !util.ptr<tensor<32xf32>> + %118 = flow.variable.address @"__iree_flow___sm_node828__m.layer-129.kernel" : !util.ptr<tensor<1x1x32x120xf32>> + %119 = flow.variable.address @"__iree_flow___sm_node829__m.layer-129.bias" : !util.ptr<tensor<120xf32>> + %120 = flow.variable.address @"__iree_flow___sm_node844__m.layer-134.kernel" : !util.ptr<tensor<1x1x120x48xf32>> + %121 = flow.variable.address @"__iree_flow___sm_node850__m.layer-135.gamma" : !util.ptr<tensor<48xf32>> + %122 = flow.variable.address @"__iree_flow___sm_node851__m.layer-135.beta" : !util.ptr<tensor<48xf32>> + %123 = flow.variable.address @"__iree_flow___sm_node852__m.layer-135.moving_mean" : !util.ptr<tensor<48xf32>> + %124 = flow.variable.address @"__iree_flow___sm_node853__m.layer-135.moving_variance" : !util.ptr<tensor<48xf32>> + %125 = flow.variable.address @"__iree_flow___sm_node858__m.layer-136.kernel" : !util.ptr<tensor<1x1x48x144xf32>> + %126 = flow.variable.address @"__iree_flow___sm_node864__m.layer-137.gamma" : !util.ptr<tensor<144xf32>> + %127 = flow.variable.address @"__iree_flow___sm_node865__m.layer-137.beta" : !util.ptr<tensor<144xf32>> + %128 = flow.variable.address @"__iree_flow___sm_node866__m.layer-137.moving_mean" : !util.ptr<tensor<144xf32>> + %129 = flow.variable.address @"__iree_flow___sm_node867__m.layer-137.moving_variance" : !util.ptr<tensor<144xf32>> + %130 = flow.variable.address @"__iree_flow___sm_node882__m.layer-142.depthwise_kernel" : !util.ptr<tensor<5x5x144x1xf32>> + %131 = flow.variable.address @"__iree_flow___sm_node888__m.layer-143.gamma" : !util.ptr<tensor<144xf32>> + %132 = flow.variable.address @"__iree_flow___sm_node889__m.layer-143.beta" : !util.ptr<tensor<144xf32>> + %133 = flow.variable.address @"__iree_flow___sm_node890__m.layer-143.moving_mean" : !util.ptr<tensor<144xf32>> + %134 = flow.variable.address @"__iree_flow___sm_node891__m.layer-143.moving_variance" : !util.ptr<tensor<144xf32>> + %135 = flow.variable.address @"__iree_flow___sm_node914__m.layer-150.kernel" : !util.ptr<tensor<1x1x144x40xf32>> + %136 = flow.variable.address @"__iree_flow___sm_node915__m.layer-150.bias" : !util.ptr<tensor<40xf32>> + %137 = flow.variable.address @"__iree_flow___sm_node924__m.layer-152.kernel" : !util.ptr<tensor<1x1x40x144xf32>> + %138 = flow.variable.address @"__iree_flow___sm_node925__m.layer-152.bias" : !util.ptr<tensor<144xf32>> + %139 = flow.variable.address @"__iree_flow___sm_node940__m.layer-157.kernel" : !util.ptr<tensor<1x1x144x48xf32>> + %140 = flow.variable.address @"__iree_flow___sm_node946__m.layer-158.gamma" : !util.ptr<tensor<48xf32>> + %141 = flow.variable.address @"__iree_flow___sm_node947__m.layer-158.beta" : !util.ptr<tensor<48xf32>> + %142 = flow.variable.address @"__iree_flow___sm_node948__m.layer-158.moving_mean" : !util.ptr<tensor<48xf32>> + %143 = flow.variable.address @"__iree_flow___sm_node949__m.layer-158.moving_variance" : !util.ptr<tensor<48xf32>> + %144 = flow.variable.address @"__iree_flow___sm_node958__m.layer-160.kernel" : !util.ptr<tensor<1x1x48x288xf32>> + %145 = flow.variable.address @"__iree_flow___sm_node964__m.layer-161.gamma" : !util.ptr<tensor<288xf32>> + %146 = flow.variable.address @"__iree_flow___sm_node965__m.layer-161.beta" : !util.ptr<tensor<288xf32>> + %147 = flow.variable.address @"__iree_flow___sm_node966__m.layer-161.moving_mean" : !util.ptr<tensor<288xf32>> + %148 = flow.variable.address @"__iree_flow___sm_node967__m.layer-161.moving_variance" : !util.ptr<tensor<288xf32>> + %149 = flow.variable.address @"__iree_flow___sm_node986__m.layer-167.depthwise_kernel" : !util.ptr<tensor<5x5x288x1xf32>> + %150 = flow.variable.address @"__iree_flow___sm_node992__m.layer-168.gamma" : !util.ptr<tensor<288xf32>> + %151 = flow.variable.address @"__iree_flow___sm_node993__m.layer-168.beta" : !util.ptr<tensor<288xf32>> + %152 = flow.variable.address @"__iree_flow___sm_node994__m.layer-168.moving_mean" : !util.ptr<tensor<288xf32>> + %153 = flow.variable.address @"__iree_flow___sm_node995__m.layer-168.moving_variance" : !util.ptr<tensor<288xf32>> + %154 = flow.variable.address @"__iree_flow___sm_node1018__m.layer-175.kernel" : !util.ptr<tensor<1x1x288x72xf32>> + %155 = flow.variable.address @"__iree_flow___sm_node1019__m.layer-175.bias" : !util.ptr<tensor<72xf32>> + %156 = flow.variable.address @"__iree_flow___sm_node1028__m.layer-177.kernel" : !util.ptr<tensor<1x1x72x288xf32>> + %157 = flow.variable.address @"__iree_flow___sm_node1029__m.layer-177.bias" : !util.ptr<tensor<288xf32>> + %158 = flow.variable.address @"__iree_flow___sm_node1044__m.layer-182.kernel" : !util.ptr<tensor<1x1x288x96xf32>> + %159 = flow.variable.address @"__iree_flow___sm_node1050__m.layer-183.gamma" : !util.ptr<tensor<96xf32>> + %160 = flow.variable.address @"__iree_flow___sm_node1051__m.layer-183.beta" : !util.ptr<tensor<96xf32>> + %161 = flow.variable.address @"__iree_flow___sm_node1052__m.layer-183.moving_mean" : !util.ptr<tensor<96xf32>> + %162 = flow.variable.address @"__iree_flow___sm_node1053__m.layer-183.moving_variance" : !util.ptr<tensor<96xf32>> + %163 = flow.variable.address @"__iree_flow___sm_node1058__m.layer-184.kernel" : !util.ptr<tensor<1x1x96x576xf32>> + %164 = flow.variable.address @"__iree_flow___sm_node1064__m.layer-185.gamma" : !util.ptr<tensor<576xf32>> + %165 = flow.variable.address @"__iree_flow___sm_node1065__m.layer-185.beta" : !util.ptr<tensor<576xf32>> + %166 = flow.variable.address @"__iree_flow___sm_node1066__m.layer-185.moving_mean" : !util.ptr<tensor<576xf32>> + %167 = flow.variable.address @"__iree_flow___sm_node1067__m.layer-185.moving_variance" : !util.ptr<tensor<576xf32>> + %168 = flow.variable.address @"__iree_flow___sm_node1082__m.layer-190.depthwise_kernel" : !util.ptr<tensor<5x5x576x1xf32>> + %169 = flow.variable.address @"__iree_flow___sm_node1088__m.layer-191.gamma" : !util.ptr<tensor<576xf32>> + %170 = flow.variable.address @"__iree_flow___sm_node1089__m.layer-191.beta" : !util.ptr<tensor<576xf32>> + %171 = flow.variable.address @"__iree_flow___sm_node1090__m.layer-191.moving_mean" : !util.ptr<tensor<576xf32>> + %172 = flow.variable.address @"__iree_flow___sm_node1091__m.layer-191.moving_variance" : !util.ptr<tensor<576xf32>> + %173 = flow.variable.address @"__iree_flow___sm_node1114__m.layer-198.kernel" : !util.ptr<tensor<1x1x576x144xf32>> + %174 = flow.variable.address @"__iree_flow___sm_node1115__m.layer-198.bias" : !util.ptr<tensor<144xf32>> + %175 = flow.variable.address @"__iree_flow___sm_node1124__m.layer-200.kernel" : !util.ptr<tensor<1x1x144x576xf32>> + %176 = flow.variable.address @"__iree_flow___sm_node1125__m.layer-200.bias" : !util.ptr<tensor<576xf32>> + %177 = flow.variable.address @"__iree_flow___sm_node1140__m.layer-205.kernel" : !util.ptr<tensor<1x1x576x96xf32>> + %178 = flow.variable.address @"__iree_flow___sm_node1146__m.layer-206.gamma" : !util.ptr<tensor<96xf32>> + %179 = flow.variable.address @"__iree_flow___sm_node1147__m.layer-206.beta" : !util.ptr<tensor<96xf32>> + %180 = flow.variable.address @"__iree_flow___sm_node1148__m.layer-206.moving_mean" : !util.ptr<tensor<96xf32>> + %181 = flow.variable.address @"__iree_flow___sm_node1149__m.layer-206.moving_variance" : !util.ptr<tensor<96xf32>> + %182 = flow.variable.address @"__iree_flow___sm_node1158__m.layer-208.kernel" : !util.ptr<tensor<1x1x96x576xf32>> + %183 = flow.variable.address @"__iree_flow___sm_node1164__m.layer-209.gamma" : !util.ptr<tensor<576xf32>> + %184 = flow.variable.address @"__iree_flow___sm_node1165__m.layer-209.beta" : !util.ptr<tensor<576xf32>> + %185 = flow.variable.address @"__iree_flow___sm_node1166__m.layer-209.moving_mean" : !util.ptr<tensor<576xf32>> + %186 = flow.variable.address @"__iree_flow___sm_node1167__m.layer-209.moving_variance" : !util.ptr<tensor<576xf32>> + %187 = flow.variable.address @"__iree_flow___sm_node1182__m.layer-214.depthwise_kernel" : !util.ptr<tensor<5x5x576x1xf32>> + %188 = flow.variable.address @"__iree_flow___sm_node1188__m.layer-215.gamma" : !util.ptr<tensor<576xf32>> + %189 = flow.variable.address @"__iree_flow___sm_node1189__m.layer-215.beta" : !util.ptr<tensor<576xf32>> + %190 = flow.variable.address @"__iree_flow___sm_node1190__m.layer-215.moving_mean" : !util.ptr<tensor<576xf32>> + %191 = flow.variable.address @"__iree_flow___sm_node1191__m.layer-215.moving_variance" : !util.ptr<tensor<576xf32>> + %192 = flow.variable.address @"__iree_flow___sm_node1214__m.layer-222.kernel" : !util.ptr<tensor<1x1x576x144xf32>> + %193 = flow.variable.address @"__iree_flow___sm_node1215__m.layer-222.bias" : !util.ptr<tensor<144xf32>> + %194 = flow.variable.address @"__iree_flow___sm_node1224__m.layer-224.kernel" : !util.ptr<tensor<1x1x144x576xf32>> + %195 = flow.variable.address @"__iree_flow___sm_node1225__m.layer-224.bias" : !util.ptr<tensor<576xf32>> + %196 = flow.variable.address @"__iree_flow___sm_node1240__m.layer-229.kernel" : !util.ptr<tensor<1x1x576x96xf32>> + %197 = flow.variable.address @"__iree_flow___sm_node1246__m.layer-230.gamma" : !util.ptr<tensor<96xf32>> + %198 = flow.variable.address @"__iree_flow___sm_node1247__m.layer-230.beta" : !util.ptr<tensor<96xf32>> + %199 = flow.variable.address @"__iree_flow___sm_node1248__m.layer-230.moving_mean" : !util.ptr<tensor<96xf32>> + %200 = flow.variable.address @"__iree_flow___sm_node1249__m.layer-230.moving_variance" : !util.ptr<tensor<96xf32>> + %201 = flow.variable.address @"__iree_flow___sm_node1258__m.layer-232.kernel" : !util.ptr<tensor<1x1x96x576xf32>> + %202 = flow.variable.address @"__iree_flow___sm_node1264__m.layer-233.gamma" : !util.ptr<tensor<576xf32>> + %203 = flow.variable.address @"__iree_flow___sm_node1265__m.layer-233.beta" : !util.ptr<tensor<576xf32>> + %204 = flow.variable.address @"__iree_flow___sm_node1266__m.layer-233.moving_mean" : !util.ptr<tensor<576xf32>> + %205 = flow.variable.address @"__iree_flow___sm_node1267__m.layer-233.moving_variance" : !util.ptr<tensor<576xf32>> + %206 = flow.variable.address @"__iree_flow___sm_node1290__m.layer-240.kernel" : !util.ptr<tensor<1x1x576x1024xf32>> + %207 = flow.variable.address @"__iree_flow___sm_node1291__m.layer-240.bias" : !util.ptr<tensor<1024xf32>> + %208 = flow.variable.address @"__iree_flow___sm_node1310__m.layer-246.kernel" : !util.ptr<tensor<1x1x1024x1000xf32>> + %209 = flow.variable.address @"__iree_flow___sm_node1311__m.layer-246.bias" : !util.ptr<tensor<1000xf32>> %210 = mhlo.constant dense<0.00784313772> : tensor<1x224x224x3xf32> %211 = mhlo.constant dense<-1.000000e+00> : tensor<1x224x224x3xf32> %212 = mhlo.constant dense<3.000000e+00> : tensor<1x112x112x16xf32> @@ -478,216 +478,216 @@ %264 = mhlo.constant dense<6.000000e+00> : tensor<f32> %265 = mhlo.constant dense<0xFF800000> : tensor<f32> %266 = mhlo.constant dense<0.000000e+00> : tensor<f32> - %267 = flow.variable.load.indirect %205 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %268 = flow.variable.load.indirect %204 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %269 = flow.variable.load.indirect %203 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %270 = flow.variable.load.indirect %202 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %271 = flow.variable.load.indirect %201 : !iree.ptr<tensor<1x1x96x576xf32>> -> tensor<1x1x96x576xf32> - %272 = flow.variable.load.indirect %207 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %273 = flow.variable.load.indirect %206 : !iree.ptr<tensor<1x1x576x1024xf32>> -> tensor<1x1x576x1024xf32> - %274 = flow.variable.load.indirect %4 : !iree.ptr<tensor<16xf32>> -> tensor<16xf32> - %275 = flow.variable.load.indirect %3 : !iree.ptr<tensor<16xf32>> -> tensor<16xf32> - %276 = flow.variable.load.indirect %2 : !iree.ptr<tensor<16xf32>> -> tensor<16xf32> - %277 = flow.variable.load.indirect %1 : !iree.ptr<tensor<16xf32>> -> tensor<16xf32> - %278 = flow.variable.load.indirect %0 : !iree.ptr<tensor<3x3x3x16xf32>> -> tensor<3x3x3x16xf32> - %279 = flow.variable.load.indirect %191 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %280 = flow.variable.load.indirect %190 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %281 = flow.variable.load.indirect %189 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %282 = flow.variable.load.indirect %188 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %283 = flow.variable.load.indirect %187 : !iree.ptr<tensor<5x5x576x1xf32>> -> tensor<5x5x576x1xf32> - %284 = flow.variable.load.indirect %186 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %285 = flow.variable.load.indirect %185 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %286 = flow.variable.load.indirect %184 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %287 = flow.variable.load.indirect %183 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %288 = flow.variable.load.indirect %182 : !iree.ptr<tensor<1x1x96x576xf32>> -> tensor<1x1x96x576xf32> - %289 = flow.variable.load.indirect %200 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %290 = flow.variable.load.indirect %199 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %291 = flow.variable.load.indirect %198 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %292 = flow.variable.load.indirect %197 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %293 = flow.variable.load.indirect %196 : !iree.ptr<tensor<1x1x576x96xf32>> -> tensor<1x1x576x96xf32> - %294 = flow.variable.load.indirect %195 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %295 = flow.variable.load.indirect %194 : !iree.ptr<tensor<1x1x144x576xf32>> -> tensor<1x1x144x576xf32> - %296 = flow.variable.load.indirect %193 : !iree.ptr<tensor<144xf32>> -> tensor<144xf32> - %297 = flow.variable.load.indirect %192 : !iree.ptr<tensor<1x1x576x144xf32>> -> tensor<1x1x576x144xf32> - %298 = flow.variable.load.indirect %28 : !iree.ptr<tensor<72xf32>> -> tensor<72xf32> - %299 = flow.variable.load.indirect %27 : !iree.ptr<tensor<72xf32>> -> tensor<72xf32> - %300 = flow.variable.load.indirect %26 : !iree.ptr<tensor<72xf32>> -> tensor<72xf32> - %301 = flow.variable.load.indirect %25 : !iree.ptr<tensor<72xf32>> -> tensor<72xf32> - %302 = flow.variable.load.indirect %24 : !iree.ptr<tensor<3x3x72x1xf32>> -> tensor<3x3x72x1xf32> - %303 = flow.variable.load.indirect %23 : !iree.ptr<tensor<72xf32>> -> tensor<72xf32> - %304 = flow.variable.load.indirect %22 : !iree.ptr<tensor<72xf32>> -> tensor<72xf32> - %305 = flow.variable.load.indirect %21 : !iree.ptr<tensor<72xf32>> -> tensor<72xf32> - %306 = flow.variable.load.indirect %20 : !iree.ptr<tensor<72xf32>> -> tensor<72xf32> - %307 = flow.variable.load.indirect %19 : !iree.ptr<tensor<1x1x16x72xf32>> -> tensor<1x1x16x72xf32> - %308 = flow.variable.load.indirect %33 : !iree.ptr<tensor<24xf32>> -> tensor<24xf32> - %309 = flow.variable.load.indirect %32 : !iree.ptr<tensor<24xf32>> -> tensor<24xf32> - %310 = flow.variable.load.indirect %31 : !iree.ptr<tensor<24xf32>> -> tensor<24xf32> - %311 = flow.variable.load.indirect %30 : !iree.ptr<tensor<24xf32>> -> tensor<24xf32> - %312 = flow.variable.load.indirect %29 : !iree.ptr<tensor<1x1x72x24xf32>> -> tensor<1x1x72x24xf32> - %313 = flow.variable.load.indirect %43 : !iree.ptr<tensor<88xf32>> -> tensor<88xf32> - %314 = flow.variable.load.indirect %42 : !iree.ptr<tensor<88xf32>> -> tensor<88xf32> - %315 = flow.variable.load.indirect %41 : !iree.ptr<tensor<88xf32>> -> tensor<88xf32> - %316 = flow.variable.load.indirect %40 : !iree.ptr<tensor<88xf32>> -> tensor<88xf32> - %317 = flow.variable.load.indirect %39 : !iree.ptr<tensor<3x3x88x1xf32>> -> tensor<3x3x88x1xf32> - %318 = flow.variable.load.indirect %38 : !iree.ptr<tensor<88xf32>> -> tensor<88xf32> - %319 = flow.variable.load.indirect %37 : !iree.ptr<tensor<88xf32>> -> tensor<88xf32> - %320 = flow.variable.load.indirect %36 : !iree.ptr<tensor<88xf32>> -> tensor<88xf32> - %321 = flow.variable.load.indirect %35 : !iree.ptr<tensor<88xf32>> -> tensor<88xf32> - %322 = flow.variable.load.indirect %34 : !iree.ptr<tensor<1x1x24x88xf32>> -> tensor<1x1x24x88xf32> - %323 = flow.variable.load.indirect %48 : !iree.ptr<tensor<24xf32>> -> tensor<24xf32> - %324 = flow.variable.load.indirect %47 : !iree.ptr<tensor<24xf32>> -> tensor<24xf32> - %325 = flow.variable.load.indirect %46 : !iree.ptr<tensor<24xf32>> -> tensor<24xf32> - %326 = flow.variable.load.indirect %45 : !iree.ptr<tensor<24xf32>> -> tensor<24xf32> - %327 = flow.variable.load.indirect %44 : !iree.ptr<tensor<1x1x88x24xf32>> -> tensor<1x1x88x24xf32> - %328 = flow.variable.load.indirect %58 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %329 = flow.variable.load.indirect %57 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %330 = flow.variable.load.indirect %56 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %331 = flow.variable.load.indirect %55 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %332 = flow.variable.load.indirect %54 : !iree.ptr<tensor<5x5x96x1xf32>> -> tensor<5x5x96x1xf32> - %333 = flow.variable.load.indirect %53 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %334 = flow.variable.load.indirect %52 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %335 = flow.variable.load.indirect %51 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %336 = flow.variable.load.indirect %50 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %337 = flow.variable.load.indirect %49 : !iree.ptr<tensor<1x1x24x96xf32>> -> tensor<1x1x24x96xf32> - %338 = flow.variable.load.indirect %67 : !iree.ptr<tensor<40xf32>> -> tensor<40xf32> - %339 = flow.variable.load.indirect %66 : !iree.ptr<tensor<40xf32>> -> tensor<40xf32> - %340 = flow.variable.load.indirect %65 : !iree.ptr<tensor<40xf32>> -> tensor<40xf32> - %341 = flow.variable.load.indirect %64 : !iree.ptr<tensor<40xf32>> -> tensor<40xf32> - %342 = flow.variable.load.indirect %63 : !iree.ptr<tensor<1x1x96x40xf32>> -> tensor<1x1x96x40xf32> - %343 = flow.variable.load.indirect %62 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %344 = flow.variable.load.indirect %61 : !iree.ptr<tensor<1x1x24x96xf32>> -> tensor<1x1x24x96xf32> - %345 = flow.variable.load.indirect %60 : !iree.ptr<tensor<24xf32>> -> tensor<24xf32> - %346 = flow.variable.load.indirect %59 : !iree.ptr<tensor<1x1x96x24xf32>> -> tensor<1x1x96x24xf32> - %347 = flow.variable.load.indirect %77 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %348 = flow.variable.load.indirect %76 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %349 = flow.variable.load.indirect %75 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %350 = flow.variable.load.indirect %74 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %351 = flow.variable.load.indirect %73 : !iree.ptr<tensor<5x5x240x1xf32>> -> tensor<5x5x240x1xf32> - %352 = flow.variable.load.indirect %72 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %353 = flow.variable.load.indirect %71 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %354 = flow.variable.load.indirect %70 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %355 = flow.variable.load.indirect %69 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %356 = flow.variable.load.indirect %68 : !iree.ptr<tensor<1x1x40x240xf32>> -> tensor<1x1x40x240xf32> - %357 = flow.variable.load.indirect %86 : !iree.ptr<tensor<40xf32>> -> tensor<40xf32> - %358 = flow.variable.load.indirect %85 : !iree.ptr<tensor<40xf32>> -> tensor<40xf32> - %359 = flow.variable.load.indirect %84 : !iree.ptr<tensor<40xf32>> -> tensor<40xf32> - %360 = flow.variable.load.indirect %83 : !iree.ptr<tensor<40xf32>> -> tensor<40xf32> - %361 = flow.variable.load.indirect %82 : !iree.ptr<tensor<1x1x240x40xf32>> -> tensor<1x1x240x40xf32> - %362 = flow.variable.load.indirect %81 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %363 = flow.variable.load.indirect %80 : !iree.ptr<tensor<1x1x64x240xf32>> -> tensor<1x1x64x240xf32> - %364 = flow.variable.load.indirect %79 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %365 = flow.variable.load.indirect %78 : !iree.ptr<tensor<1x1x240x64xf32>> -> tensor<1x1x240x64xf32> - %366 = flow.variable.load.indirect %96 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %367 = flow.variable.load.indirect %95 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %368 = flow.variable.load.indirect %94 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %369 = flow.variable.load.indirect %93 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %370 = flow.variable.load.indirect %92 : !iree.ptr<tensor<5x5x240x1xf32>> -> tensor<5x5x240x1xf32> - %371 = flow.variable.load.indirect %91 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %372 = flow.variable.load.indirect %90 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %373 = flow.variable.load.indirect %89 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %374 = flow.variable.load.indirect %88 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %375 = flow.variable.load.indirect %87 : !iree.ptr<tensor<1x1x40x240xf32>> -> tensor<1x1x40x240xf32> - %376 = flow.variable.load.indirect %105 : !iree.ptr<tensor<40xf32>> -> tensor<40xf32> - %377 = flow.variable.load.indirect %104 : !iree.ptr<tensor<40xf32>> -> tensor<40xf32> - %378 = flow.variable.load.indirect %103 : !iree.ptr<tensor<40xf32>> -> tensor<40xf32> - %379 = flow.variable.load.indirect %102 : !iree.ptr<tensor<40xf32>> -> tensor<40xf32> - %380 = flow.variable.load.indirect %101 : !iree.ptr<tensor<1x1x240x40xf32>> -> tensor<1x1x240x40xf32> - %381 = flow.variable.load.indirect %100 : !iree.ptr<tensor<240xf32>> -> tensor<240xf32> - %382 = flow.variable.load.indirect %99 : !iree.ptr<tensor<1x1x64x240xf32>> -> tensor<1x1x64x240xf32> - %383 = flow.variable.load.indirect %98 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %384 = flow.variable.load.indirect %97 : !iree.ptr<tensor<1x1x240x64xf32>> -> tensor<1x1x240x64xf32> - %385 = flow.variable.load.indirect %115 : !iree.ptr<tensor<120xf32>> -> tensor<120xf32> - %386 = flow.variable.load.indirect %114 : !iree.ptr<tensor<120xf32>> -> tensor<120xf32> - %387 = flow.variable.load.indirect %113 : !iree.ptr<tensor<120xf32>> -> tensor<120xf32> - %388 = flow.variable.load.indirect %112 : !iree.ptr<tensor<120xf32>> -> tensor<120xf32> - %389 = flow.variable.load.indirect %111 : !iree.ptr<tensor<5x5x120x1xf32>> -> tensor<5x5x120x1xf32> - %390 = flow.variable.load.indirect %110 : !iree.ptr<tensor<120xf32>> -> tensor<120xf32> - %391 = flow.variable.load.indirect %109 : !iree.ptr<tensor<120xf32>> -> tensor<120xf32> - %392 = flow.variable.load.indirect %108 : !iree.ptr<tensor<120xf32>> -> tensor<120xf32> - %393 = flow.variable.load.indirect %107 : !iree.ptr<tensor<120xf32>> -> tensor<120xf32> - %394 = flow.variable.load.indirect %106 : !iree.ptr<tensor<1x1x40x120xf32>> -> tensor<1x1x40x120xf32> - %395 = flow.variable.load.indirect %124 : !iree.ptr<tensor<48xf32>> -> tensor<48xf32> - %396 = flow.variable.load.indirect %123 : !iree.ptr<tensor<48xf32>> -> tensor<48xf32> - %397 = flow.variable.load.indirect %122 : !iree.ptr<tensor<48xf32>> -> tensor<48xf32> - %398 = flow.variable.load.indirect %121 : !iree.ptr<tensor<48xf32>> -> tensor<48xf32> - %399 = flow.variable.load.indirect %120 : !iree.ptr<tensor<1x1x120x48xf32>> -> tensor<1x1x120x48xf32> - %400 = flow.variable.load.indirect %119 : !iree.ptr<tensor<120xf32>> -> tensor<120xf32> - %401 = flow.variable.load.indirect %118 : !iree.ptr<tensor<1x1x32x120xf32>> -> tensor<1x1x32x120xf32> - %402 = flow.variable.load.indirect %117 : !iree.ptr<tensor<32xf32>> -> tensor<32xf32> - %403 = flow.variable.load.indirect %116 : !iree.ptr<tensor<1x1x120x32xf32>> -> tensor<1x1x120x32xf32> - %404 = flow.variable.load.indirect %134 : !iree.ptr<tensor<144xf32>> -> tensor<144xf32> - %405 = flow.variable.load.indirect %133 : !iree.ptr<tensor<144xf32>> -> tensor<144xf32> - %406 = flow.variable.load.indirect %132 : !iree.ptr<tensor<144xf32>> -> tensor<144xf32> - %407 = flow.variable.load.indirect %131 : !iree.ptr<tensor<144xf32>> -> tensor<144xf32> - %408 = flow.variable.load.indirect %130 : !iree.ptr<tensor<5x5x144x1xf32>> -> tensor<5x5x144x1xf32> - %409 = flow.variable.load.indirect %129 : !iree.ptr<tensor<144xf32>> -> tensor<144xf32> - %410 = flow.variable.load.indirect %128 : !iree.ptr<tensor<144xf32>> -> tensor<144xf32> - %411 = flow.variable.load.indirect %127 : !iree.ptr<tensor<144xf32>> -> tensor<144xf32> - %412 = flow.variable.load.indirect %126 : !iree.ptr<tensor<144xf32>> -> tensor<144xf32> - %413 = flow.variable.load.indirect %125 : !iree.ptr<tensor<1x1x48x144xf32>> -> tensor<1x1x48x144xf32> - %414 = flow.variable.load.indirect %143 : !iree.ptr<tensor<48xf32>> -> tensor<48xf32> - %415 = flow.variable.load.indirect %142 : !iree.ptr<tensor<48xf32>> -> tensor<48xf32> - %416 = flow.variable.load.indirect %141 : !iree.ptr<tensor<48xf32>> -> tensor<48xf32> - %417 = flow.variable.load.indirect %140 : !iree.ptr<tensor<48xf32>> -> tensor<48xf32> - %418 = flow.variable.load.indirect %139 : !iree.ptr<tensor<1x1x144x48xf32>> -> tensor<1x1x144x48xf32> - %419 = flow.variable.load.indirect %138 : !iree.ptr<tensor<144xf32>> -> tensor<144xf32> - %420 = flow.variable.load.indirect %137 : !iree.ptr<tensor<1x1x40x144xf32>> -> tensor<1x1x40x144xf32> - %421 = flow.variable.load.indirect %136 : !iree.ptr<tensor<40xf32>> -> tensor<40xf32> - %422 = flow.variable.load.indirect %135 : !iree.ptr<tensor<1x1x144x40xf32>> -> tensor<1x1x144x40xf32> - %423 = flow.variable.load.indirect %153 : !iree.ptr<tensor<288xf32>> -> tensor<288xf32> - %424 = flow.variable.load.indirect %152 : !iree.ptr<tensor<288xf32>> -> tensor<288xf32> - %425 = flow.variable.load.indirect %151 : !iree.ptr<tensor<288xf32>> -> tensor<288xf32> - %426 = flow.variable.load.indirect %150 : !iree.ptr<tensor<288xf32>> -> tensor<288xf32> - %427 = flow.variable.load.indirect %149 : !iree.ptr<tensor<5x5x288x1xf32>> -> tensor<5x5x288x1xf32> - %428 = flow.variable.load.indirect %148 : !iree.ptr<tensor<288xf32>> -> tensor<288xf32> - %429 = flow.variable.load.indirect %147 : !iree.ptr<tensor<288xf32>> -> tensor<288xf32> - %430 = flow.variable.load.indirect %146 : !iree.ptr<tensor<288xf32>> -> tensor<288xf32> - %431 = flow.variable.load.indirect %145 : !iree.ptr<tensor<288xf32>> -> tensor<288xf32> - %432 = flow.variable.load.indirect %144 : !iree.ptr<tensor<1x1x48x288xf32>> -> tensor<1x1x48x288xf32> - %433 = flow.variable.load.indirect %162 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %434 = flow.variable.load.indirect %161 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %435 = flow.variable.load.indirect %160 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %436 = flow.variable.load.indirect %159 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %437 = flow.variable.load.indirect %158 : !iree.ptr<tensor<1x1x288x96xf32>> -> tensor<1x1x288x96xf32> - %438 = flow.variable.load.indirect %157 : !iree.ptr<tensor<288xf32>> -> tensor<288xf32> - %439 = flow.variable.load.indirect %156 : !iree.ptr<tensor<1x1x72x288xf32>> -> tensor<1x1x72x288xf32> - %440 = flow.variable.load.indirect %155 : !iree.ptr<tensor<72xf32>> -> tensor<72xf32> - %441 = flow.variable.load.indirect %154 : !iree.ptr<tensor<1x1x288x72xf32>> -> tensor<1x1x288x72xf32> - %442 = flow.variable.load.indirect %172 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %443 = flow.variable.load.indirect %171 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %444 = flow.variable.load.indirect %170 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %445 = flow.variable.load.indirect %169 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %446 = flow.variable.load.indirect %168 : !iree.ptr<tensor<5x5x576x1xf32>> -> tensor<5x5x576x1xf32> - %447 = flow.variable.load.indirect %167 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %448 = flow.variable.load.indirect %166 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %449 = flow.variable.load.indirect %165 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %450 = flow.variable.load.indirect %164 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %451 = flow.variable.load.indirect %163 : !iree.ptr<tensor<1x1x96x576xf32>> -> tensor<1x1x96x576xf32> - %452 = flow.variable.load.indirect %181 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %453 = flow.variable.load.indirect %180 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %454 = flow.variable.load.indirect %179 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %455 = flow.variable.load.indirect %178 : !iree.ptr<tensor<96xf32>> -> tensor<96xf32> - %456 = flow.variable.load.indirect %177 : !iree.ptr<tensor<1x1x576x96xf32>> -> tensor<1x1x576x96xf32> - %457 = flow.variable.load.indirect %176 : !iree.ptr<tensor<576xf32>> -> tensor<576xf32> - %458 = flow.variable.load.indirect %175 : !iree.ptr<tensor<1x1x144x576xf32>> -> tensor<1x1x144x576xf32> - %459 = flow.variable.load.indirect %174 : !iree.ptr<tensor<144xf32>> -> tensor<144xf32> - %460 = flow.variable.load.indirect %173 : !iree.ptr<tensor<1x1x576x144xf32>> -> tensor<1x1x576x144xf32> - %461 = flow.variable.load.indirect %9 : !iree.ptr<tensor<16xf32>> -> tensor<16xf32> - %462 = flow.variable.load.indirect %8 : !iree.ptr<tensor<16xf32>> -> tensor<16xf32> - %463 = flow.variable.load.indirect %7 : !iree.ptr<tensor<16xf32>> -> tensor<16xf32> - %464 = flow.variable.load.indirect %6 : !iree.ptr<tensor<16xf32>> -> tensor<16xf32> - %465 = flow.variable.load.indirect %5 : !iree.ptr<tensor<3x3x16x1xf32>> -> tensor<3x3x16x1xf32> - %466 = flow.variable.load.indirect %18 : !iree.ptr<tensor<16xf32>> -> tensor<16xf32> - %467 = flow.variable.load.indirect %17 : !iree.ptr<tensor<16xf32>> -> tensor<16xf32> - %468 = flow.variable.load.indirect %16 : !iree.ptr<tensor<16xf32>> -> tensor<16xf32> - %469 = flow.variable.load.indirect %15 : !iree.ptr<tensor<16xf32>> -> tensor<16xf32> - %470 = flow.variable.load.indirect %14 : !iree.ptr<tensor<1x1x16x16xf32>> -> tensor<1x1x16x16xf32> - %471 = flow.variable.load.indirect %13 : !iree.ptr<tensor<16xf32>> -> tensor<16xf32> - %472 = flow.variable.load.indirect %12 : !iree.ptr<tensor<1x1x8x16xf32>> -> tensor<1x1x8x16xf32> - %473 = flow.variable.load.indirect %11 : !iree.ptr<tensor<8xf32>> -> tensor<8xf32> - %474 = flow.variable.load.indirect %10 : !iree.ptr<tensor<1x1x16x8xf32>> -> tensor<1x1x16x8xf32> - %475 = flow.variable.load.indirect %209 : !iree.ptr<tensor<1000xf32>> -> tensor<1000xf32> - %476 = flow.variable.load.indirect %208 : !iree.ptr<tensor<1x1x1024x1000xf32>> -> tensor<1x1x1024x1000xf32> + %267 = flow.variable.load.indirect %205 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %268 = flow.variable.load.indirect %204 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %269 = flow.variable.load.indirect %203 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %270 = flow.variable.load.indirect %202 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %271 = flow.variable.load.indirect %201 : !util.ptr<tensor<1x1x96x576xf32>> -> tensor<1x1x96x576xf32> + %272 = flow.variable.load.indirect %207 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %273 = flow.variable.load.indirect %206 : !util.ptr<tensor<1x1x576x1024xf32>> -> tensor<1x1x576x1024xf32> + %274 = flow.variable.load.indirect %4 : !util.ptr<tensor<16xf32>> -> tensor<16xf32> + %275 = flow.variable.load.indirect %3 : !util.ptr<tensor<16xf32>> -> tensor<16xf32> + %276 = flow.variable.load.indirect %2 : !util.ptr<tensor<16xf32>> -> tensor<16xf32> + %277 = flow.variable.load.indirect %1 : !util.ptr<tensor<16xf32>> -> tensor<16xf32> + %278 = flow.variable.load.indirect %0 : !util.ptr<tensor<3x3x3x16xf32>> -> tensor<3x3x3x16xf32> + %279 = flow.variable.load.indirect %191 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %280 = flow.variable.load.indirect %190 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %281 = flow.variable.load.indirect %189 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %282 = flow.variable.load.indirect %188 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %283 = flow.variable.load.indirect %187 : !util.ptr<tensor<5x5x576x1xf32>> -> tensor<5x5x576x1xf32> + %284 = flow.variable.load.indirect %186 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %285 = flow.variable.load.indirect %185 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %286 = flow.variable.load.indirect %184 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %287 = flow.variable.load.indirect %183 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %288 = flow.variable.load.indirect %182 : !util.ptr<tensor<1x1x96x576xf32>> -> tensor<1x1x96x576xf32> + %289 = flow.variable.load.indirect %200 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %290 = flow.variable.load.indirect %199 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %291 = flow.variable.load.indirect %198 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %292 = flow.variable.load.indirect %197 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %293 = flow.variable.load.indirect %196 : !util.ptr<tensor<1x1x576x96xf32>> -> tensor<1x1x576x96xf32> + %294 = flow.variable.load.indirect %195 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %295 = flow.variable.load.indirect %194 : !util.ptr<tensor<1x1x144x576xf32>> -> tensor<1x1x144x576xf32> + %296 = flow.variable.load.indirect %193 : !util.ptr<tensor<144xf32>> -> tensor<144xf32> + %297 = flow.variable.load.indirect %192 : !util.ptr<tensor<1x1x576x144xf32>> -> tensor<1x1x576x144xf32> + %298 = flow.variable.load.indirect %28 : !util.ptr<tensor<72xf32>> -> tensor<72xf32> + %299 = flow.variable.load.indirect %27 : !util.ptr<tensor<72xf32>> -> tensor<72xf32> + %300 = flow.variable.load.indirect %26 : !util.ptr<tensor<72xf32>> -> tensor<72xf32> + %301 = flow.variable.load.indirect %25 : !util.ptr<tensor<72xf32>> -> tensor<72xf32> + %302 = flow.variable.load.indirect %24 : !util.ptr<tensor<3x3x72x1xf32>> -> tensor<3x3x72x1xf32> + %303 = flow.variable.load.indirect %23 : !util.ptr<tensor<72xf32>> -> tensor<72xf32> + %304 = flow.variable.load.indirect %22 : !util.ptr<tensor<72xf32>> -> tensor<72xf32> + %305 = flow.variable.load.indirect %21 : !util.ptr<tensor<72xf32>> -> tensor<72xf32> + %306 = flow.variable.load.indirect %20 : !util.ptr<tensor<72xf32>> -> tensor<72xf32> + %307 = flow.variable.load.indirect %19 : !util.ptr<tensor<1x1x16x72xf32>> -> tensor<1x1x16x72xf32> + %308 = flow.variable.load.indirect %33 : !util.ptr<tensor<24xf32>> -> tensor<24xf32> + %309 = flow.variable.load.indirect %32 : !util.ptr<tensor<24xf32>> -> tensor<24xf32> + %310 = flow.variable.load.indirect %31 : !util.ptr<tensor<24xf32>> -> tensor<24xf32> + %311 = flow.variable.load.indirect %30 : !util.ptr<tensor<24xf32>> -> tensor<24xf32> + %312 = flow.variable.load.indirect %29 : !util.ptr<tensor<1x1x72x24xf32>> -> tensor<1x1x72x24xf32> + %313 = flow.variable.load.indirect %43 : !util.ptr<tensor<88xf32>> -> tensor<88xf32> + %314 = flow.variable.load.indirect %42 : !util.ptr<tensor<88xf32>> -> tensor<88xf32> + %315 = flow.variable.load.indirect %41 : !util.ptr<tensor<88xf32>> -> tensor<88xf32> + %316 = flow.variable.load.indirect %40 : !util.ptr<tensor<88xf32>> -> tensor<88xf32> + %317 = flow.variable.load.indirect %39 : !util.ptr<tensor<3x3x88x1xf32>> -> tensor<3x3x88x1xf32> + %318 = flow.variable.load.indirect %38 : !util.ptr<tensor<88xf32>> -> tensor<88xf32> + %319 = flow.variable.load.indirect %37 : !util.ptr<tensor<88xf32>> -> tensor<88xf32> + %320 = flow.variable.load.indirect %36 : !util.ptr<tensor<88xf32>> -> tensor<88xf32> + %321 = flow.variable.load.indirect %35 : !util.ptr<tensor<88xf32>> -> tensor<88xf32> + %322 = flow.variable.load.indirect %34 : !util.ptr<tensor<1x1x24x88xf32>> -> tensor<1x1x24x88xf32> + %323 = flow.variable.load.indirect %48 : !util.ptr<tensor<24xf32>> -> tensor<24xf32> + %324 = flow.variable.load.indirect %47 : !util.ptr<tensor<24xf32>> -> tensor<24xf32> + %325 = flow.variable.load.indirect %46 : !util.ptr<tensor<24xf32>> -> tensor<24xf32> + %326 = flow.variable.load.indirect %45 : !util.ptr<tensor<24xf32>> -> tensor<24xf32> + %327 = flow.variable.load.indirect %44 : !util.ptr<tensor<1x1x88x24xf32>> -> tensor<1x1x88x24xf32> + %328 = flow.variable.load.indirect %58 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %329 = flow.variable.load.indirect %57 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %330 = flow.variable.load.indirect %56 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %331 = flow.variable.load.indirect %55 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %332 = flow.variable.load.indirect %54 : !util.ptr<tensor<5x5x96x1xf32>> -> tensor<5x5x96x1xf32> + %333 = flow.variable.load.indirect %53 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %334 = flow.variable.load.indirect %52 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %335 = flow.variable.load.indirect %51 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %336 = flow.variable.load.indirect %50 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %337 = flow.variable.load.indirect %49 : !util.ptr<tensor<1x1x24x96xf32>> -> tensor<1x1x24x96xf32> + %338 = flow.variable.load.indirect %67 : !util.ptr<tensor<40xf32>> -> tensor<40xf32> + %339 = flow.variable.load.indirect %66 : !util.ptr<tensor<40xf32>> -> tensor<40xf32> + %340 = flow.variable.load.indirect %65 : !util.ptr<tensor<40xf32>> -> tensor<40xf32> + %341 = flow.variable.load.indirect %64 : !util.ptr<tensor<40xf32>> -> tensor<40xf32> + %342 = flow.variable.load.indirect %63 : !util.ptr<tensor<1x1x96x40xf32>> -> tensor<1x1x96x40xf32> + %343 = flow.variable.load.indirect %62 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %344 = flow.variable.load.indirect %61 : !util.ptr<tensor<1x1x24x96xf32>> -> tensor<1x1x24x96xf32> + %345 = flow.variable.load.indirect %60 : !util.ptr<tensor<24xf32>> -> tensor<24xf32> + %346 = flow.variable.load.indirect %59 : !util.ptr<tensor<1x1x96x24xf32>> -> tensor<1x1x96x24xf32> + %347 = flow.variable.load.indirect %77 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %348 = flow.variable.load.indirect %76 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %349 = flow.variable.load.indirect %75 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %350 = flow.variable.load.indirect %74 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %351 = flow.variable.load.indirect %73 : !util.ptr<tensor<5x5x240x1xf32>> -> tensor<5x5x240x1xf32> + %352 = flow.variable.load.indirect %72 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %353 = flow.variable.load.indirect %71 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %354 = flow.variable.load.indirect %70 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %355 = flow.variable.load.indirect %69 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %356 = flow.variable.load.indirect %68 : !util.ptr<tensor<1x1x40x240xf32>> -> tensor<1x1x40x240xf32> + %357 = flow.variable.load.indirect %86 : !util.ptr<tensor<40xf32>> -> tensor<40xf32> + %358 = flow.variable.load.indirect %85 : !util.ptr<tensor<40xf32>> -> tensor<40xf32> + %359 = flow.variable.load.indirect %84 : !util.ptr<tensor<40xf32>> -> tensor<40xf32> + %360 = flow.variable.load.indirect %83 : !util.ptr<tensor<40xf32>> -> tensor<40xf32> + %361 = flow.variable.load.indirect %82 : !util.ptr<tensor<1x1x240x40xf32>> -> tensor<1x1x240x40xf32> + %362 = flow.variable.load.indirect %81 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %363 = flow.variable.load.indirect %80 : !util.ptr<tensor<1x1x64x240xf32>> -> tensor<1x1x64x240xf32> + %364 = flow.variable.load.indirect %79 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %365 = flow.variable.load.indirect %78 : !util.ptr<tensor<1x1x240x64xf32>> -> tensor<1x1x240x64xf32> + %366 = flow.variable.load.indirect %96 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %367 = flow.variable.load.indirect %95 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %368 = flow.variable.load.indirect %94 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %369 = flow.variable.load.indirect %93 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %370 = flow.variable.load.indirect %92 : !util.ptr<tensor<5x5x240x1xf32>> -> tensor<5x5x240x1xf32> + %371 = flow.variable.load.indirect %91 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %372 = flow.variable.load.indirect %90 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %373 = flow.variable.load.indirect %89 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %374 = flow.variable.load.indirect %88 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %375 = flow.variable.load.indirect %87 : !util.ptr<tensor<1x1x40x240xf32>> -> tensor<1x1x40x240xf32> + %376 = flow.variable.load.indirect %105 : !util.ptr<tensor<40xf32>> -> tensor<40xf32> + %377 = flow.variable.load.indirect %104 : !util.ptr<tensor<40xf32>> -> tensor<40xf32> + %378 = flow.variable.load.indirect %103 : !util.ptr<tensor<40xf32>> -> tensor<40xf32> + %379 = flow.variable.load.indirect %102 : !util.ptr<tensor<40xf32>> -> tensor<40xf32> + %380 = flow.variable.load.indirect %101 : !util.ptr<tensor<1x1x240x40xf32>> -> tensor<1x1x240x40xf32> + %381 = flow.variable.load.indirect %100 : !util.ptr<tensor<240xf32>> -> tensor<240xf32> + %382 = flow.variable.load.indirect %99 : !util.ptr<tensor<1x1x64x240xf32>> -> tensor<1x1x64x240xf32> + %383 = flow.variable.load.indirect %98 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %384 = flow.variable.load.indirect %97 : !util.ptr<tensor<1x1x240x64xf32>> -> tensor<1x1x240x64xf32> + %385 = flow.variable.load.indirect %115 : !util.ptr<tensor<120xf32>> -> tensor<120xf32> + %386 = flow.variable.load.indirect %114 : !util.ptr<tensor<120xf32>> -> tensor<120xf32> + %387 = flow.variable.load.indirect %113 : !util.ptr<tensor<120xf32>> -> tensor<120xf32> + %388 = flow.variable.load.indirect %112 : !util.ptr<tensor<120xf32>> -> tensor<120xf32> + %389 = flow.variable.load.indirect %111 : !util.ptr<tensor<5x5x120x1xf32>> -> tensor<5x5x120x1xf32> + %390 = flow.variable.load.indirect %110 : !util.ptr<tensor<120xf32>> -> tensor<120xf32> + %391 = flow.variable.load.indirect %109 : !util.ptr<tensor<120xf32>> -> tensor<120xf32> + %392 = flow.variable.load.indirect %108 : !util.ptr<tensor<120xf32>> -> tensor<120xf32> + %393 = flow.variable.load.indirect %107 : !util.ptr<tensor<120xf32>> -> tensor<120xf32> + %394 = flow.variable.load.indirect %106 : !util.ptr<tensor<1x1x40x120xf32>> -> tensor<1x1x40x120xf32> + %395 = flow.variable.load.indirect %124 : !util.ptr<tensor<48xf32>> -> tensor<48xf32> + %396 = flow.variable.load.indirect %123 : !util.ptr<tensor<48xf32>> -> tensor<48xf32> + %397 = flow.variable.load.indirect %122 : !util.ptr<tensor<48xf32>> -> tensor<48xf32> + %398 = flow.variable.load.indirect %121 : !util.ptr<tensor<48xf32>> -> tensor<48xf32> + %399 = flow.variable.load.indirect %120 : !util.ptr<tensor<1x1x120x48xf32>> -> tensor<1x1x120x48xf32> + %400 = flow.variable.load.indirect %119 : !util.ptr<tensor<120xf32>> -> tensor<120xf32> + %401 = flow.variable.load.indirect %118 : !util.ptr<tensor<1x1x32x120xf32>> -> tensor<1x1x32x120xf32> + %402 = flow.variable.load.indirect %117 : !util.ptr<tensor<32xf32>> -> tensor<32xf32> + %403 = flow.variable.load.indirect %116 : !util.ptr<tensor<1x1x120x32xf32>> -> tensor<1x1x120x32xf32> + %404 = flow.variable.load.indirect %134 : !util.ptr<tensor<144xf32>> -> tensor<144xf32> + %405 = flow.variable.load.indirect %133 : !util.ptr<tensor<144xf32>> -> tensor<144xf32> + %406 = flow.variable.load.indirect %132 : !util.ptr<tensor<144xf32>> -> tensor<144xf32> + %407 = flow.variable.load.indirect %131 : !util.ptr<tensor<144xf32>> -> tensor<144xf32> + %408 = flow.variable.load.indirect %130 : !util.ptr<tensor<5x5x144x1xf32>> -> tensor<5x5x144x1xf32> + %409 = flow.variable.load.indirect %129 : !util.ptr<tensor<144xf32>> -> tensor<144xf32> + %410 = flow.variable.load.indirect %128 : !util.ptr<tensor<144xf32>> -> tensor<144xf32> + %411 = flow.variable.load.indirect %127 : !util.ptr<tensor<144xf32>> -> tensor<144xf32> + %412 = flow.variable.load.indirect %126 : !util.ptr<tensor<144xf32>> -> tensor<144xf32> + %413 = flow.variable.load.indirect %125 : !util.ptr<tensor<1x1x48x144xf32>> -> tensor<1x1x48x144xf32> + %414 = flow.variable.load.indirect %143 : !util.ptr<tensor<48xf32>> -> tensor<48xf32> + %415 = flow.variable.load.indirect %142 : !util.ptr<tensor<48xf32>> -> tensor<48xf32> + %416 = flow.variable.load.indirect %141 : !util.ptr<tensor<48xf32>> -> tensor<48xf32> + %417 = flow.variable.load.indirect %140 : !util.ptr<tensor<48xf32>> -> tensor<48xf32> + %418 = flow.variable.load.indirect %139 : !util.ptr<tensor<1x1x144x48xf32>> -> tensor<1x1x144x48xf32> + %419 = flow.variable.load.indirect %138 : !util.ptr<tensor<144xf32>> -> tensor<144xf32> + %420 = flow.variable.load.indirect %137 : !util.ptr<tensor<1x1x40x144xf32>> -> tensor<1x1x40x144xf32> + %421 = flow.variable.load.indirect %136 : !util.ptr<tensor<40xf32>> -> tensor<40xf32> + %422 = flow.variable.load.indirect %135 : !util.ptr<tensor<1x1x144x40xf32>> -> tensor<1x1x144x40xf32> + %423 = flow.variable.load.indirect %153 : !util.ptr<tensor<288xf32>> -> tensor<288xf32> + %424 = flow.variable.load.indirect %152 : !util.ptr<tensor<288xf32>> -> tensor<288xf32> + %425 = flow.variable.load.indirect %151 : !util.ptr<tensor<288xf32>> -> tensor<288xf32> + %426 = flow.variable.load.indirect %150 : !util.ptr<tensor<288xf32>> -> tensor<288xf32> + %427 = flow.variable.load.indirect %149 : !util.ptr<tensor<5x5x288x1xf32>> -> tensor<5x5x288x1xf32> + %428 = flow.variable.load.indirect %148 : !util.ptr<tensor<288xf32>> -> tensor<288xf32> + %429 = flow.variable.load.indirect %147 : !util.ptr<tensor<288xf32>> -> tensor<288xf32> + %430 = flow.variable.load.indirect %146 : !util.ptr<tensor<288xf32>> -> tensor<288xf32> + %431 = flow.variable.load.indirect %145 : !util.ptr<tensor<288xf32>> -> tensor<288xf32> + %432 = flow.variable.load.indirect %144 : !util.ptr<tensor<1x1x48x288xf32>> -> tensor<1x1x48x288xf32> + %433 = flow.variable.load.indirect %162 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %434 = flow.variable.load.indirect %161 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %435 = flow.variable.load.indirect %160 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %436 = flow.variable.load.indirect %159 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %437 = flow.variable.load.indirect %158 : !util.ptr<tensor<1x1x288x96xf32>> -> tensor<1x1x288x96xf32> + %438 = flow.variable.load.indirect %157 : !util.ptr<tensor<288xf32>> -> tensor<288xf32> + %439 = flow.variable.load.indirect %156 : !util.ptr<tensor<1x1x72x288xf32>> -> tensor<1x1x72x288xf32> + %440 = flow.variable.load.indirect %155 : !util.ptr<tensor<72xf32>> -> tensor<72xf32> + %441 = flow.variable.load.indirect %154 : !util.ptr<tensor<1x1x288x72xf32>> -> tensor<1x1x288x72xf32> + %442 = flow.variable.load.indirect %172 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %443 = flow.variable.load.indirect %171 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %444 = flow.variable.load.indirect %170 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %445 = flow.variable.load.indirect %169 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %446 = flow.variable.load.indirect %168 : !util.ptr<tensor<5x5x576x1xf32>> -> tensor<5x5x576x1xf32> + %447 = flow.variable.load.indirect %167 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %448 = flow.variable.load.indirect %166 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %449 = flow.variable.load.indirect %165 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %450 = flow.variable.load.indirect %164 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %451 = flow.variable.load.indirect %163 : !util.ptr<tensor<1x1x96x576xf32>> -> tensor<1x1x96x576xf32> + %452 = flow.variable.load.indirect %181 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %453 = flow.variable.load.indirect %180 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %454 = flow.variable.load.indirect %179 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %455 = flow.variable.load.indirect %178 : !util.ptr<tensor<96xf32>> -> tensor<96xf32> + %456 = flow.variable.load.indirect %177 : !util.ptr<tensor<1x1x576x96xf32>> -> tensor<1x1x576x96xf32> + %457 = flow.variable.load.indirect %176 : !util.ptr<tensor<576xf32>> -> tensor<576xf32> + %458 = flow.variable.load.indirect %175 : !util.ptr<tensor<1x1x144x576xf32>> -> tensor<1x1x144x576xf32> + %459 = flow.variable.load.indirect %174 : !util.ptr<tensor<144xf32>> -> tensor<144xf32> + %460 = flow.variable.load.indirect %173 : !util.ptr<tensor<1x1x576x144xf32>> -> tensor<1x1x576x144xf32> + %461 = flow.variable.load.indirect %9 : !util.ptr<tensor<16xf32>> -> tensor<16xf32> + %462 = flow.variable.load.indirect %8 : !util.ptr<tensor<16xf32>> -> tensor<16xf32> + %463 = flow.variable.load.indirect %7 : !util.ptr<tensor<16xf32>> -> tensor<16xf32> + %464 = flow.variable.load.indirect %6 : !util.ptr<tensor<16xf32>> -> tensor<16xf32> + %465 = flow.variable.load.indirect %5 : !util.ptr<tensor<3x3x16x1xf32>> -> tensor<3x3x16x1xf32> + %466 = flow.variable.load.indirect %18 : !util.ptr<tensor<16xf32>> -> tensor<16xf32> + %467 = flow.variable.load.indirect %17 : !util.ptr<tensor<16xf32>> -> tensor<16xf32> + %468 = flow.variable.load.indirect %16 : !util.ptr<tensor<16xf32>> -> tensor<16xf32> + %469 = flow.variable.load.indirect %15 : !util.ptr<tensor<16xf32>> -> tensor<16xf32> + %470 = flow.variable.load.indirect %14 : !util.ptr<tensor<1x1x16x16xf32>> -> tensor<1x1x16x16xf32> + %471 = flow.variable.load.indirect %13 : !util.ptr<tensor<16xf32>> -> tensor<16xf32> + %472 = flow.variable.load.indirect %12 : !util.ptr<tensor<1x1x8x16xf32>> -> tensor<1x1x8x16xf32> + %473 = flow.variable.load.indirect %11 : !util.ptr<tensor<8xf32>> -> tensor<8xf32> + %474 = flow.variable.load.indirect %10 : !util.ptr<tensor<1x1x16x8xf32>> -> tensor<1x1x16x8xf32> + %475 = flow.variable.load.indirect %209 : !util.ptr<tensor<1000xf32>> -> tensor<1000xf32> + %476 = flow.variable.load.indirect %208 : !util.ptr<tensor<1x1x1024x1000xf32>> -> tensor<1x1x1024x1000xf32> %477 = mhlo.multiply %arg0, %210 : tensor<1x224x224x3xf32> %478 = mhlo.add %477, %211 : tensor<1x224x224x3xf32> %479 = "mhlo.convolution"(%478, %278) {batch_group_count = 1 : i64, dimension_numbers = {input_batch_dimension = 0 : i64, input_feature_dimension = 3 : i64, input_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>, kernel_input_feature_dimension = 2 : i64, kernel_output_feature_dimension = 3 : i64, kernel_spatial_dimensions = dense<[0, 1]> : tensor<2xi64>, output_batch_dimension = 0 : i64, output_feature_dimension = 3 : i64, output_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>}, feature_group_count = 1 : i64, padding = dense<[[0, 1], [0, 1]]> : tensor<2x2xi64>, rhs_dilation = dense<1> : tensor<2xi64>, window_strides = dense<2> : tensor<2xi64>} : (tensor<1x224x224x3xf32>, tensor<3x3x3x16xf32>) -> tensor<1x112x112x16xf32>
diff --git a/iree/test/e2e/models/resnet50_fake_weights.mlir b/iree/test/e2e/models/resnet50_fake_weights.mlir index 46ab917..14fca46 100644 --- a/iree/test/e2e/models/resnet50_fake_weights.mlir +++ b/iree/test/e2e/models/resnet50_fake_weights.mlir
@@ -327,326 +327,326 @@ flow.variable @"__iree_flow___sm_node1256__m.layer-176.bias" dense<3.125000e-03> : tensor<1000xf32> attributes {noinline, sym_visibility = "private"} // CHECK-LABEL: EXEC @predict func @predict(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x1000xf32> attributes {iree.module.export, iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I8!S5!k0_0R3!_0"}} { - %0 = flow.variable.address @"__iree_flow___sm_node188__m.layer-2.kernel" : !iree.ptr<tensor<7x7x3x64xf32>> - %1 = flow.variable.address @"__iree_flow___sm_node189__m.layer-2.bias" : !iree.ptr<tensor<64xf32>> - %2 = flow.variable.address @"__iree_flow___sm_node195__m.layer-3.gamma" : !iree.ptr<tensor<64xf32>> - %3 = flow.variable.address @"__iree_flow___sm_node196__m.layer-3.beta" : !iree.ptr<tensor<64xf32>> - %4 = flow.variable.address @"__iree_flow___sm_node197__m.layer-3.moving_mean" : !iree.ptr<tensor<64xf32>> - %5 = flow.variable.address @"__iree_flow___sm_node198__m.layer-3.moving_variance" : !iree.ptr<tensor<64xf32>> - %6 = flow.variable.address @"__iree_flow___sm_node215__m.layer-7.kernel" : !iree.ptr<tensor<1x1x64x64xf32>> - %7 = flow.variable.address @"__iree_flow___sm_node216__m.layer-7.bias" : !iree.ptr<tensor<64xf32>> - %8 = flow.variable.address @"__iree_flow___sm_node222__m.layer-8.gamma" : !iree.ptr<tensor<64xf32>> - %9 = flow.variable.address @"__iree_flow___sm_node223__m.layer-8.beta" : !iree.ptr<tensor<64xf32>> - %10 = flow.variable.address @"__iree_flow___sm_node224__m.layer-8.moving_mean" : !iree.ptr<tensor<64xf32>> - %11 = flow.variable.address @"__iree_flow___sm_node225__m.layer-8.moving_variance" : !iree.ptr<tensor<64xf32>> - %12 = flow.variable.address @"__iree_flow___sm_node234__m.layer-10.kernel" : !iree.ptr<tensor<3x3x64x64xf32>> - %13 = flow.variable.address @"__iree_flow___sm_node235__m.layer-10.bias" : !iree.ptr<tensor<64xf32>> - %14 = flow.variable.address @"__iree_flow___sm_node241__m.layer-11.gamma" : !iree.ptr<tensor<64xf32>> - %15 = flow.variable.address @"__iree_flow___sm_node242__m.layer-11.beta" : !iree.ptr<tensor<64xf32>> - %16 = flow.variable.address @"__iree_flow___sm_node243__m.layer-11.moving_mean" : !iree.ptr<tensor<64xf32>> - %17 = flow.variable.address @"__iree_flow___sm_node244__m.layer-11.moving_variance" : !iree.ptr<tensor<64xf32>> - %18 = flow.variable.address @"__iree_flow___sm_node259__m.layer-14.kernel" : !iree.ptr<tensor<1x1x64x256xf32>> - %19 = flow.variable.address @"__iree_flow___sm_node260__m.layer-14.bias" : !iree.ptr<tensor<256xf32>> - %20 = flow.variable.address @"__iree_flow___sm_node253__m.layer-13.kernel" : !iree.ptr<tensor<1x1x64x256xf32>> - %21 = flow.variable.address @"__iree_flow___sm_node254__m.layer-13.bias" : !iree.ptr<tensor<256xf32>> - %22 = flow.variable.address @"__iree_flow___sm_node266__m.layer-15.gamma" : !iree.ptr<tensor<256xf32>> - %23 = flow.variable.address @"__iree_flow___sm_node267__m.layer-15.beta" : !iree.ptr<tensor<256xf32>> - %24 = flow.variable.address @"__iree_flow___sm_node268__m.layer-15.moving_mean" : !iree.ptr<tensor<256xf32>> - %25 = flow.variable.address @"__iree_flow___sm_node269__m.layer-15.moving_variance" : !iree.ptr<tensor<256xf32>> - %26 = flow.variable.address @"__iree_flow___sm_node275__m.layer-16.gamma" : !iree.ptr<tensor<256xf32>> - %27 = flow.variable.address @"__iree_flow___sm_node276__m.layer-16.beta" : !iree.ptr<tensor<256xf32>> - %28 = flow.variable.address @"__iree_flow___sm_node277__m.layer-16.moving_mean" : !iree.ptr<tensor<256xf32>> - %29 = flow.variable.address @"__iree_flow___sm_node278__m.layer-16.moving_variance" : !iree.ptr<tensor<256xf32>> - %30 = flow.variable.address @"__iree_flow___sm_node291__m.layer-19.kernel" : !iree.ptr<tensor<1x1x256x64xf32>> - %31 = flow.variable.address @"__iree_flow___sm_node292__m.layer-19.bias" : !iree.ptr<tensor<64xf32>> - %32 = flow.variable.address @"__iree_flow___sm_node298__m.layer-20.gamma" : !iree.ptr<tensor<64xf32>> - %33 = flow.variable.address @"__iree_flow___sm_node299__m.layer-20.beta" : !iree.ptr<tensor<64xf32>> - %34 = flow.variable.address @"__iree_flow___sm_node300__m.layer-20.moving_mean" : !iree.ptr<tensor<64xf32>> - %35 = flow.variable.address @"__iree_flow___sm_node301__m.layer-20.moving_variance" : !iree.ptr<tensor<64xf32>> - %36 = flow.variable.address @"__iree_flow___sm_node310__m.layer-22.kernel" : !iree.ptr<tensor<3x3x64x64xf32>> - %37 = flow.variable.address @"__iree_flow___sm_node311__m.layer-22.bias" : !iree.ptr<tensor<64xf32>> - %38 = flow.variable.address @"__iree_flow___sm_node317__m.layer-23.gamma" : !iree.ptr<tensor<64xf32>> - %39 = flow.variable.address @"__iree_flow___sm_node318__m.layer-23.beta" : !iree.ptr<tensor<64xf32>> - %40 = flow.variable.address @"__iree_flow___sm_node319__m.layer-23.moving_mean" : !iree.ptr<tensor<64xf32>> - %41 = flow.variable.address @"__iree_flow___sm_node320__m.layer-23.moving_variance" : !iree.ptr<tensor<64xf32>> - %42 = flow.variable.address @"__iree_flow___sm_node329__m.layer-25.kernel" : !iree.ptr<tensor<1x1x64x256xf32>> - %43 = flow.variable.address @"__iree_flow___sm_node330__m.layer-25.bias" : !iree.ptr<tensor<256xf32>> - %44 = flow.variable.address @"__iree_flow___sm_node336__m.layer-26.gamma" : !iree.ptr<tensor<256xf32>> - %45 = flow.variable.address @"__iree_flow___sm_node337__m.layer-26.beta" : !iree.ptr<tensor<256xf32>> - %46 = flow.variable.address @"__iree_flow___sm_node338__m.layer-26.moving_mean" : !iree.ptr<tensor<256xf32>> - %47 = flow.variable.address @"__iree_flow___sm_node339__m.layer-26.moving_variance" : !iree.ptr<tensor<256xf32>> - %48 = flow.variable.address @"__iree_flow___sm_node352__m.layer-29.kernel" : !iree.ptr<tensor<1x1x256x64xf32>> - %49 = flow.variable.address @"__iree_flow___sm_node353__m.layer-29.bias" : !iree.ptr<tensor<64xf32>> - %50 = flow.variable.address @"__iree_flow___sm_node359__m.layer-30.gamma" : !iree.ptr<tensor<64xf32>> - %51 = flow.variable.address @"__iree_flow___sm_node360__m.layer-30.beta" : !iree.ptr<tensor<64xf32>> - %52 = flow.variable.address @"__iree_flow___sm_node361__m.layer-30.moving_mean" : !iree.ptr<tensor<64xf32>> - %53 = flow.variable.address @"__iree_flow___sm_node362__m.layer-30.moving_variance" : !iree.ptr<tensor<64xf32>> - %54 = flow.variable.address @"__iree_flow___sm_node371__m.layer-32.kernel" : !iree.ptr<tensor<3x3x64x64xf32>> - %55 = flow.variable.address @"__iree_flow___sm_node372__m.layer-32.bias" : !iree.ptr<tensor<64xf32>> - %56 = flow.variable.address @"__iree_flow___sm_node378__m.layer-33.gamma" : !iree.ptr<tensor<64xf32>> - %57 = flow.variable.address @"__iree_flow___sm_node379__m.layer-33.beta" : !iree.ptr<tensor<64xf32>> - %58 = flow.variable.address @"__iree_flow___sm_node380__m.layer-33.moving_mean" : !iree.ptr<tensor<64xf32>> - %59 = flow.variable.address @"__iree_flow___sm_node381__m.layer-33.moving_variance" : !iree.ptr<tensor<64xf32>> - %60 = flow.variable.address @"__iree_flow___sm_node390__m.layer-35.kernel" : !iree.ptr<tensor<1x1x64x256xf32>> - %61 = flow.variable.address @"__iree_flow___sm_node391__m.layer-35.bias" : !iree.ptr<tensor<256xf32>> - %62 = flow.variable.address @"__iree_flow___sm_node397__m.layer-36.gamma" : !iree.ptr<tensor<256xf32>> - %63 = flow.variable.address @"__iree_flow___sm_node398__m.layer-36.beta" : !iree.ptr<tensor<256xf32>> - %64 = flow.variable.address @"__iree_flow___sm_node399__m.layer-36.moving_mean" : !iree.ptr<tensor<256xf32>> - %65 = flow.variable.address @"__iree_flow___sm_node400__m.layer-36.moving_variance" : !iree.ptr<tensor<256xf32>> - %66 = flow.variable.address @"__iree_flow___sm_node413__m.layer-39.kernel" : !iree.ptr<tensor<1x1x256x128xf32>> - %67 = flow.variable.address @"__iree_flow___sm_node414__m.layer-39.bias" : !iree.ptr<tensor<128xf32>> - %68 = flow.variable.address @"__iree_flow___sm_node420__m.layer-40.gamma" : !iree.ptr<tensor<128xf32>> - %69 = flow.variable.address @"__iree_flow___sm_node421__m.layer-40.beta" : !iree.ptr<tensor<128xf32>> - %70 = flow.variable.address @"__iree_flow___sm_node422__m.layer-40.moving_mean" : !iree.ptr<tensor<128xf32>> - %71 = flow.variable.address @"__iree_flow___sm_node423__m.layer-40.moving_variance" : !iree.ptr<tensor<128xf32>> - %72 = flow.variable.address @"__iree_flow___sm_node432__m.layer-42.kernel" : !iree.ptr<tensor<3x3x128x128xf32>> - %73 = flow.variable.address @"__iree_flow___sm_node433__m.layer-42.bias" : !iree.ptr<tensor<128xf32>> - %74 = flow.variable.address @"__iree_flow___sm_node439__m.layer-43.gamma" : !iree.ptr<tensor<128xf32>> - %75 = flow.variable.address @"__iree_flow___sm_node440__m.layer-43.beta" : !iree.ptr<tensor<128xf32>> - %76 = flow.variable.address @"__iree_flow___sm_node441__m.layer-43.moving_mean" : !iree.ptr<tensor<128xf32>> - %77 = flow.variable.address @"__iree_flow___sm_node442__m.layer-43.moving_variance" : !iree.ptr<tensor<128xf32>> - %78 = flow.variable.address @"__iree_flow___sm_node457__m.layer-46.kernel" : !iree.ptr<tensor<1x1x128x512xf32>> - %79 = flow.variable.address @"__iree_flow___sm_node458__m.layer-46.bias" : !iree.ptr<tensor<512xf32>> - %80 = flow.variable.address @"__iree_flow___sm_node451__m.layer-45.kernel" : !iree.ptr<tensor<1x1x256x512xf32>> - %81 = flow.variable.address @"__iree_flow___sm_node452__m.layer-45.bias" : !iree.ptr<tensor<512xf32>> - %82 = flow.variable.address @"__iree_flow___sm_node464__m.layer-47.gamma" : !iree.ptr<tensor<512xf32>> - %83 = flow.variable.address @"__iree_flow___sm_node465__m.layer-47.beta" : !iree.ptr<tensor<512xf32>> - %84 = flow.variable.address @"__iree_flow___sm_node466__m.layer-47.moving_mean" : !iree.ptr<tensor<512xf32>> - %85 = flow.variable.address @"__iree_flow___sm_node467__m.layer-47.moving_variance" : !iree.ptr<tensor<512xf32>> - %86 = flow.variable.address @"__iree_flow___sm_node473__m.layer-48.gamma" : !iree.ptr<tensor<512xf32>> - %87 = flow.variable.address @"__iree_flow___sm_node474__m.layer-48.beta" : !iree.ptr<tensor<512xf32>> - %88 = flow.variable.address @"__iree_flow___sm_node475__m.layer-48.moving_mean" : !iree.ptr<tensor<512xf32>> - %89 = flow.variable.address @"__iree_flow___sm_node476__m.layer-48.moving_variance" : !iree.ptr<tensor<512xf32>> - %90 = flow.variable.address @"__iree_flow___sm_node489__m.layer-51.kernel" : !iree.ptr<tensor<1x1x512x128xf32>> - %91 = flow.variable.address @"__iree_flow___sm_node490__m.layer-51.bias" : !iree.ptr<tensor<128xf32>> - %92 = flow.variable.address @"__iree_flow___sm_node496__m.layer-52.gamma" : !iree.ptr<tensor<128xf32>> - %93 = flow.variable.address @"__iree_flow___sm_node497__m.layer-52.beta" : !iree.ptr<tensor<128xf32>> - %94 = flow.variable.address @"__iree_flow___sm_node498__m.layer-52.moving_mean" : !iree.ptr<tensor<128xf32>> - %95 = flow.variable.address @"__iree_flow___sm_node499__m.layer-52.moving_variance" : !iree.ptr<tensor<128xf32>> - %96 = flow.variable.address @"__iree_flow___sm_node508__m.layer-54.kernel" : !iree.ptr<tensor<3x3x128x128xf32>> - %97 = flow.variable.address @"__iree_flow___sm_node509__m.layer-54.bias" : !iree.ptr<tensor<128xf32>> - %98 = flow.variable.address @"__iree_flow___sm_node515__m.layer-55.gamma" : !iree.ptr<tensor<128xf32>> - %99 = flow.variable.address @"__iree_flow___sm_node516__m.layer-55.beta" : !iree.ptr<tensor<128xf32>> - %100 = flow.variable.address @"__iree_flow___sm_node517__m.layer-55.moving_mean" : !iree.ptr<tensor<128xf32>> - %101 = flow.variable.address @"__iree_flow___sm_node518__m.layer-55.moving_variance" : !iree.ptr<tensor<128xf32>> - %102 = flow.variable.address @"__iree_flow___sm_node527__m.layer-57.kernel" : !iree.ptr<tensor<1x1x128x512xf32>> - %103 = flow.variable.address @"__iree_flow___sm_node528__m.layer-57.bias" : !iree.ptr<tensor<512xf32>> - %104 = flow.variable.address @"__iree_flow___sm_node534__m.layer-58.gamma" : !iree.ptr<tensor<512xf32>> - %105 = flow.variable.address @"__iree_flow___sm_node535__m.layer-58.beta" : !iree.ptr<tensor<512xf32>> - %106 = flow.variable.address @"__iree_flow___sm_node536__m.layer-58.moving_mean" : !iree.ptr<tensor<512xf32>> - %107 = flow.variable.address @"__iree_flow___sm_node537__m.layer-58.moving_variance" : !iree.ptr<tensor<512xf32>> - %108 = flow.variable.address @"__iree_flow___sm_node550__m.layer-61.kernel" : !iree.ptr<tensor<1x1x512x128xf32>> - %109 = flow.variable.address @"__iree_flow___sm_node551__m.layer-61.bias" : !iree.ptr<tensor<128xf32>> - %110 = flow.variable.address @"__iree_flow___sm_node557__m.layer-62.gamma" : !iree.ptr<tensor<128xf32>> - %111 = flow.variable.address @"__iree_flow___sm_node558__m.layer-62.beta" : !iree.ptr<tensor<128xf32>> - %112 = flow.variable.address @"__iree_flow___sm_node559__m.layer-62.moving_mean" : !iree.ptr<tensor<128xf32>> - %113 = flow.variable.address @"__iree_flow___sm_node560__m.layer-62.moving_variance" : !iree.ptr<tensor<128xf32>> - %114 = flow.variable.address @"__iree_flow___sm_node569__m.layer-64.kernel" : !iree.ptr<tensor<3x3x128x128xf32>> - %115 = flow.variable.address @"__iree_flow___sm_node570__m.layer-64.bias" : !iree.ptr<tensor<128xf32>> - %116 = flow.variable.address @"__iree_flow___sm_node576__m.layer-65.gamma" : !iree.ptr<tensor<128xf32>> - %117 = flow.variable.address @"__iree_flow___sm_node577__m.layer-65.beta" : !iree.ptr<tensor<128xf32>> - %118 = flow.variable.address @"__iree_flow___sm_node578__m.layer-65.moving_mean" : !iree.ptr<tensor<128xf32>> - %119 = flow.variable.address @"__iree_flow___sm_node579__m.layer-65.moving_variance" : !iree.ptr<tensor<128xf32>> - %120 = flow.variable.address @"__iree_flow___sm_node588__m.layer-67.kernel" : !iree.ptr<tensor<1x1x128x512xf32>> - %121 = flow.variable.address @"__iree_flow___sm_node589__m.layer-67.bias" : !iree.ptr<tensor<512xf32>> - %122 = flow.variable.address @"__iree_flow___sm_node595__m.layer-68.gamma" : !iree.ptr<tensor<512xf32>> - %123 = flow.variable.address @"__iree_flow___sm_node596__m.layer-68.beta" : !iree.ptr<tensor<512xf32>> - %124 = flow.variable.address @"__iree_flow___sm_node597__m.layer-68.moving_mean" : !iree.ptr<tensor<512xf32>> - %125 = flow.variable.address @"__iree_flow___sm_node598__m.layer-68.moving_variance" : !iree.ptr<tensor<512xf32>> - %126 = flow.variable.address @"__iree_flow___sm_node611__m.layer-71.kernel" : !iree.ptr<tensor<1x1x512x128xf32>> - %127 = flow.variable.address @"__iree_flow___sm_node612__m.layer-71.bias" : !iree.ptr<tensor<128xf32>> - %128 = flow.variable.address @"__iree_flow___sm_node618__m.layer-72.gamma" : !iree.ptr<tensor<128xf32>> - %129 = flow.variable.address @"__iree_flow___sm_node619__m.layer-72.beta" : !iree.ptr<tensor<128xf32>> - %130 = flow.variable.address @"__iree_flow___sm_node620__m.layer-72.moving_mean" : !iree.ptr<tensor<128xf32>> - %131 = flow.variable.address @"__iree_flow___sm_node621__m.layer-72.moving_variance" : !iree.ptr<tensor<128xf32>> - %132 = flow.variable.address @"__iree_flow___sm_node630__m.layer-74.kernel" : !iree.ptr<tensor<3x3x128x128xf32>> - %133 = flow.variable.address @"__iree_flow___sm_node631__m.layer-74.bias" : !iree.ptr<tensor<128xf32>> - %134 = flow.variable.address @"__iree_flow___sm_node637__m.layer-75.gamma" : !iree.ptr<tensor<128xf32>> - %135 = flow.variable.address @"__iree_flow___sm_node638__m.layer-75.beta" : !iree.ptr<tensor<128xf32>> - %136 = flow.variable.address @"__iree_flow___sm_node639__m.layer-75.moving_mean" : !iree.ptr<tensor<128xf32>> - %137 = flow.variable.address @"__iree_flow___sm_node640__m.layer-75.moving_variance" : !iree.ptr<tensor<128xf32>> - %138 = flow.variable.address @"__iree_flow___sm_node649__m.layer-77.kernel" : !iree.ptr<tensor<1x1x128x512xf32>> - %139 = flow.variable.address @"__iree_flow___sm_node650__m.layer-77.bias" : !iree.ptr<tensor<512xf32>> - %140 = flow.variable.address @"__iree_flow___sm_node656__m.layer-78.gamma" : !iree.ptr<tensor<512xf32>> - %141 = flow.variable.address @"__iree_flow___sm_node657__m.layer-78.beta" : !iree.ptr<tensor<512xf32>> - %142 = flow.variable.address @"__iree_flow___sm_node658__m.layer-78.moving_mean" : !iree.ptr<tensor<512xf32>> - %143 = flow.variable.address @"__iree_flow___sm_node659__m.layer-78.moving_variance" : !iree.ptr<tensor<512xf32>> - %144 = flow.variable.address @"__iree_flow___sm_node672__m.layer-81.kernel" : !iree.ptr<tensor<1x1x512x256xf32>> - %145 = flow.variable.address @"__iree_flow___sm_node673__m.layer-81.bias" : !iree.ptr<tensor<256xf32>> - %146 = flow.variable.address @"__iree_flow___sm_node679__m.layer-82.gamma" : !iree.ptr<tensor<256xf32>> - %147 = flow.variable.address @"__iree_flow___sm_node680__m.layer-82.beta" : !iree.ptr<tensor<256xf32>> - %148 = flow.variable.address @"__iree_flow___sm_node681__m.layer-82.moving_mean" : !iree.ptr<tensor<256xf32>> - %149 = flow.variable.address @"__iree_flow___sm_node682__m.layer-82.moving_variance" : !iree.ptr<tensor<256xf32>> - %150 = flow.variable.address @"__iree_flow___sm_node691__m.layer-84.kernel" : !iree.ptr<tensor<3x3x256x256xf32>> - %151 = flow.variable.address @"__iree_flow___sm_node692__m.layer-84.bias" : !iree.ptr<tensor<256xf32>> - %152 = flow.variable.address @"__iree_flow___sm_node698__m.layer-85.gamma" : !iree.ptr<tensor<256xf32>> - %153 = flow.variable.address @"__iree_flow___sm_node699__m.layer-85.beta" : !iree.ptr<tensor<256xf32>> - %154 = flow.variable.address @"__iree_flow___sm_node700__m.layer-85.moving_mean" : !iree.ptr<tensor<256xf32>> - %155 = flow.variable.address @"__iree_flow___sm_node701__m.layer-85.moving_variance" : !iree.ptr<tensor<256xf32>> - %156 = flow.variable.address @"__iree_flow___sm_node716__m.layer-88.kernel" : !iree.ptr<tensor<1x1x256x1024xf32>> - %157 = flow.variable.address @"__iree_flow___sm_node717__m.layer-88.bias" : !iree.ptr<tensor<1024xf32>> - %158 = flow.variable.address @"__iree_flow___sm_node710__m.layer-87.kernel" : !iree.ptr<tensor<1x1x512x1024xf32>> - %159 = flow.variable.address @"__iree_flow___sm_node711__m.layer-87.bias" : !iree.ptr<tensor<1024xf32>> - %160 = flow.variable.address @"__iree_flow___sm_node723__m.layer-89.gamma" : !iree.ptr<tensor<1024xf32>> - %161 = flow.variable.address @"__iree_flow___sm_node724__m.layer-89.beta" : !iree.ptr<tensor<1024xf32>> - %162 = flow.variable.address @"__iree_flow___sm_node725__m.layer-89.moving_mean" : !iree.ptr<tensor<1024xf32>> - %163 = flow.variable.address @"__iree_flow___sm_node726__m.layer-89.moving_variance" : !iree.ptr<tensor<1024xf32>> - %164 = flow.variable.address @"__iree_flow___sm_node732__m.layer-90.gamma" : !iree.ptr<tensor<1024xf32>> - %165 = flow.variable.address @"__iree_flow___sm_node733__m.layer-90.beta" : !iree.ptr<tensor<1024xf32>> - %166 = flow.variable.address @"__iree_flow___sm_node734__m.layer-90.moving_mean" : !iree.ptr<tensor<1024xf32>> - %167 = flow.variable.address @"__iree_flow___sm_node735__m.layer-90.moving_variance" : !iree.ptr<tensor<1024xf32>> - %168 = flow.variable.address @"__iree_flow___sm_node748__m.layer-93.kernel" : !iree.ptr<tensor<1x1x1024x256xf32>> - %169 = flow.variable.address @"__iree_flow___sm_node749__m.layer-93.bias" : !iree.ptr<tensor<256xf32>> - %170 = flow.variable.address @"__iree_flow___sm_node755__m.layer-94.gamma" : !iree.ptr<tensor<256xf32>> - %171 = flow.variable.address @"__iree_flow___sm_node756__m.layer-94.beta" : !iree.ptr<tensor<256xf32>> - %172 = flow.variable.address @"__iree_flow___sm_node757__m.layer-94.moving_mean" : !iree.ptr<tensor<256xf32>> - %173 = flow.variable.address @"__iree_flow___sm_node758__m.layer-94.moving_variance" : !iree.ptr<tensor<256xf32>> - %174 = flow.variable.address @"__iree_flow___sm_node767__m.layer-96.kernel" : !iree.ptr<tensor<3x3x256x256xf32>> - %175 = flow.variable.address @"__iree_flow___sm_node768__m.layer-96.bias" : !iree.ptr<tensor<256xf32>> - %176 = flow.variable.address @"__iree_flow___sm_node774__m.layer-97.gamma" : !iree.ptr<tensor<256xf32>> - %177 = flow.variable.address @"__iree_flow___sm_node775__m.layer-97.beta" : !iree.ptr<tensor<256xf32>> - %178 = flow.variable.address @"__iree_flow___sm_node776__m.layer-97.moving_mean" : !iree.ptr<tensor<256xf32>> - %179 = flow.variable.address @"__iree_flow___sm_node777__m.layer-97.moving_variance" : !iree.ptr<tensor<256xf32>> - %180 = flow.variable.address @"__iree_flow___sm_node786__m.layer-99.kernel" : !iree.ptr<tensor<1x1x256x1024xf32>> - %181 = flow.variable.address @"__iree_flow___sm_node787__m.layer-99.bias" : !iree.ptr<tensor<1024xf32>> - %182 = flow.variable.address @"__iree_flow___sm_node793__m.layer-100.gamma" : !iree.ptr<tensor<1024xf32>> - %183 = flow.variable.address @"__iree_flow___sm_node794__m.layer-100.beta" : !iree.ptr<tensor<1024xf32>> - %184 = flow.variable.address @"__iree_flow___sm_node795__m.layer-100.moving_mean" : !iree.ptr<tensor<1024xf32>> - %185 = flow.variable.address @"__iree_flow___sm_node796__m.layer-100.moving_variance" : !iree.ptr<tensor<1024xf32>> - %186 = flow.variable.address @"__iree_flow___sm_node809__m.layer-103.kernel" : !iree.ptr<tensor<1x1x1024x256xf32>> - %187 = flow.variable.address @"__iree_flow___sm_node810__m.layer-103.bias" : !iree.ptr<tensor<256xf32>> - %188 = flow.variable.address @"__iree_flow___sm_node816__m.layer-104.gamma" : !iree.ptr<tensor<256xf32>> - %189 = flow.variable.address @"__iree_flow___sm_node817__m.layer-104.beta" : !iree.ptr<tensor<256xf32>> - %190 = flow.variable.address @"__iree_flow___sm_node818__m.layer-104.moving_mean" : !iree.ptr<tensor<256xf32>> - %191 = flow.variable.address @"__iree_flow___sm_node819__m.layer-104.moving_variance" : !iree.ptr<tensor<256xf32>> - %192 = flow.variable.address @"__iree_flow___sm_node828__m.layer-106.kernel" : !iree.ptr<tensor<3x3x256x256xf32>> - %193 = flow.variable.address @"__iree_flow___sm_node829__m.layer-106.bias" : !iree.ptr<tensor<256xf32>> - %194 = flow.variable.address @"__iree_flow___sm_node835__m.layer-107.gamma" : !iree.ptr<tensor<256xf32>> - %195 = flow.variable.address @"__iree_flow___sm_node836__m.layer-107.beta" : !iree.ptr<tensor<256xf32>> - %196 = flow.variable.address @"__iree_flow___sm_node837__m.layer-107.moving_mean" : !iree.ptr<tensor<256xf32>> - %197 = flow.variable.address @"__iree_flow___sm_node838__m.layer-107.moving_variance" : !iree.ptr<tensor<256xf32>> - %198 = flow.variable.address @"__iree_flow___sm_node847__m.layer-109.kernel" : !iree.ptr<tensor<1x1x256x1024xf32>> - %199 = flow.variable.address @"__iree_flow___sm_node848__m.layer-109.bias" : !iree.ptr<tensor<1024xf32>> - %200 = flow.variable.address @"__iree_flow___sm_node854__m.layer-110.gamma" : !iree.ptr<tensor<1024xf32>> - %201 = flow.variable.address @"__iree_flow___sm_node855__m.layer-110.beta" : !iree.ptr<tensor<1024xf32>> - %202 = flow.variable.address @"__iree_flow___sm_node856__m.layer-110.moving_mean" : !iree.ptr<tensor<1024xf32>> - %203 = flow.variable.address @"__iree_flow___sm_node857__m.layer-110.moving_variance" : !iree.ptr<tensor<1024xf32>> - %204 = flow.variable.address @"__iree_flow___sm_node870__m.layer-113.kernel" : !iree.ptr<tensor<1x1x1024x256xf32>> - %205 = flow.variable.address @"__iree_flow___sm_node871__m.layer-113.bias" : !iree.ptr<tensor<256xf32>> - %206 = flow.variable.address @"__iree_flow___sm_node877__m.layer-114.gamma" : !iree.ptr<tensor<256xf32>> - %207 = flow.variable.address @"__iree_flow___sm_node878__m.layer-114.beta" : !iree.ptr<tensor<256xf32>> - %208 = flow.variable.address @"__iree_flow___sm_node879__m.layer-114.moving_mean" : !iree.ptr<tensor<256xf32>> - %209 = flow.variable.address @"__iree_flow___sm_node880__m.layer-114.moving_variance" : !iree.ptr<tensor<256xf32>> - %210 = flow.variable.address @"__iree_flow___sm_node889__m.layer-116.kernel" : !iree.ptr<tensor<3x3x256x256xf32>> - %211 = flow.variable.address @"__iree_flow___sm_node890__m.layer-116.bias" : !iree.ptr<tensor<256xf32>> - %212 = flow.variable.address @"__iree_flow___sm_node896__m.layer-117.gamma" : !iree.ptr<tensor<256xf32>> - %213 = flow.variable.address @"__iree_flow___sm_node897__m.layer-117.beta" : !iree.ptr<tensor<256xf32>> - %214 = flow.variable.address @"__iree_flow___sm_node898__m.layer-117.moving_mean" : !iree.ptr<tensor<256xf32>> - %215 = flow.variable.address @"__iree_flow___sm_node899__m.layer-117.moving_variance" : !iree.ptr<tensor<256xf32>> - %216 = flow.variable.address @"__iree_flow___sm_node908__m.layer-119.kernel" : !iree.ptr<tensor<1x1x256x1024xf32>> - %217 = flow.variable.address @"__iree_flow___sm_node909__m.layer-119.bias" : !iree.ptr<tensor<1024xf32>> - %218 = flow.variable.address @"__iree_flow___sm_node915__m.layer-120.gamma" : !iree.ptr<tensor<1024xf32>> - %219 = flow.variable.address @"__iree_flow___sm_node916__m.layer-120.beta" : !iree.ptr<tensor<1024xf32>> - %220 = flow.variable.address @"__iree_flow___sm_node917__m.layer-120.moving_mean" : !iree.ptr<tensor<1024xf32>> - %221 = flow.variable.address @"__iree_flow___sm_node918__m.layer-120.moving_variance" : !iree.ptr<tensor<1024xf32>> - %222 = flow.variable.address @"__iree_flow___sm_node931__m.layer-123.kernel" : !iree.ptr<tensor<1x1x1024x256xf32>> - %223 = flow.variable.address @"__iree_flow___sm_node932__m.layer-123.bias" : !iree.ptr<tensor<256xf32>> - %224 = flow.variable.address @"__iree_flow___sm_node938__m.layer-124.gamma" : !iree.ptr<tensor<256xf32>> - %225 = flow.variable.address @"__iree_flow___sm_node939__m.layer-124.beta" : !iree.ptr<tensor<256xf32>> - %226 = flow.variable.address @"__iree_flow___sm_node940__m.layer-124.moving_mean" : !iree.ptr<tensor<256xf32>> - %227 = flow.variable.address @"__iree_flow___sm_node941__m.layer-124.moving_variance" : !iree.ptr<tensor<256xf32>> - %228 = flow.variable.address @"__iree_flow___sm_node950__m.layer-126.kernel" : !iree.ptr<tensor<3x3x256x256xf32>> - %229 = flow.variable.address @"__iree_flow___sm_node951__m.layer-126.bias" : !iree.ptr<tensor<256xf32>> - %230 = flow.variable.address @"__iree_flow___sm_node957__m.layer-127.gamma" : !iree.ptr<tensor<256xf32>> - %231 = flow.variable.address @"__iree_flow___sm_node958__m.layer-127.beta" : !iree.ptr<tensor<256xf32>> - %232 = flow.variable.address @"__iree_flow___sm_node959__m.layer-127.moving_mean" : !iree.ptr<tensor<256xf32>> - %233 = flow.variable.address @"__iree_flow___sm_node960__m.layer-127.moving_variance" : !iree.ptr<tensor<256xf32>> - %234 = flow.variable.address @"__iree_flow___sm_node969__m.layer-129.kernel" : !iree.ptr<tensor<1x1x256x1024xf32>> - %235 = flow.variable.address @"__iree_flow___sm_node970__m.layer-129.bias" : !iree.ptr<tensor<1024xf32>> - %236 = flow.variable.address @"__iree_flow___sm_node976__m.layer-130.gamma" : !iree.ptr<tensor<1024xf32>> - %237 = flow.variable.address @"__iree_flow___sm_node977__m.layer-130.beta" : !iree.ptr<tensor<1024xf32>> - %238 = flow.variable.address @"__iree_flow___sm_node978__m.layer-130.moving_mean" : !iree.ptr<tensor<1024xf32>> - %239 = flow.variable.address @"__iree_flow___sm_node979__m.layer-130.moving_variance" : !iree.ptr<tensor<1024xf32>> - %240 = flow.variable.address @"__iree_flow___sm_node992__m.layer-133.kernel" : !iree.ptr<tensor<1x1x1024x256xf32>> - %241 = flow.variable.address @"__iree_flow___sm_node993__m.layer-133.bias" : !iree.ptr<tensor<256xf32>> - %242 = flow.variable.address @"__iree_flow___sm_node999__m.layer-134.gamma" : !iree.ptr<tensor<256xf32>> - %243 = flow.variable.address @"__iree_flow___sm_node1000__m.layer-134.beta" : !iree.ptr<tensor<256xf32>> - %244 = flow.variable.address @"__iree_flow___sm_node1001__m.layer-134.moving_mean" : !iree.ptr<tensor<256xf32>> - %245 = flow.variable.address @"__iree_flow___sm_node1002__m.layer-134.moving_variance" : !iree.ptr<tensor<256xf32>> - %246 = flow.variable.address @"__iree_flow___sm_node1011__m.layer-136.kernel" : !iree.ptr<tensor<3x3x256x256xf32>> - %247 = flow.variable.address @"__iree_flow___sm_node1012__m.layer-136.bias" : !iree.ptr<tensor<256xf32>> - %248 = flow.variable.address @"__iree_flow___sm_node1018__m.layer-137.gamma" : !iree.ptr<tensor<256xf32>> - %249 = flow.variable.address @"__iree_flow___sm_node1019__m.layer-137.beta" : !iree.ptr<tensor<256xf32>> - %250 = flow.variable.address @"__iree_flow___sm_node1020__m.layer-137.moving_mean" : !iree.ptr<tensor<256xf32>> - %251 = flow.variable.address @"__iree_flow___sm_node1021__m.layer-137.moving_variance" : !iree.ptr<tensor<256xf32>> - %252 = flow.variable.address @"__iree_flow___sm_node1030__m.layer-139.kernel" : !iree.ptr<tensor<1x1x256x1024xf32>> - %253 = flow.variable.address @"__iree_flow___sm_node1031__m.layer-139.bias" : !iree.ptr<tensor<1024xf32>> - %254 = flow.variable.address @"__iree_flow___sm_node1037__m.layer-140.gamma" : !iree.ptr<tensor<1024xf32>> - %255 = flow.variable.address @"__iree_flow___sm_node1038__m.layer-140.beta" : !iree.ptr<tensor<1024xf32>> - %256 = flow.variable.address @"__iree_flow___sm_node1039__m.layer-140.moving_mean" : !iree.ptr<tensor<1024xf32>> - %257 = flow.variable.address @"__iree_flow___sm_node1040__m.layer-140.moving_variance" : !iree.ptr<tensor<1024xf32>> - %258 = flow.variable.address @"__iree_flow___sm_node1053__m.layer-143.kernel" : !iree.ptr<tensor<1x1x1024x512xf32>> - %259 = flow.variable.address @"__iree_flow___sm_node1054__m.layer-143.bias" : !iree.ptr<tensor<512xf32>> - %260 = flow.variable.address @"__iree_flow___sm_node1060__m.layer-144.gamma" : !iree.ptr<tensor<512xf32>> - %261 = flow.variable.address @"__iree_flow___sm_node1061__m.layer-144.beta" : !iree.ptr<tensor<512xf32>> - %262 = flow.variable.address @"__iree_flow___sm_node1062__m.layer-144.moving_mean" : !iree.ptr<tensor<512xf32>> - %263 = flow.variable.address @"__iree_flow___sm_node1063__m.layer-144.moving_variance" : !iree.ptr<tensor<512xf32>> - %264 = flow.variable.address @"__iree_flow___sm_node1072__m.layer-146.kernel" : !iree.ptr<tensor<3x3x512x512xf32>> - %265 = flow.variable.address @"__iree_flow___sm_node1073__m.layer-146.bias" : !iree.ptr<tensor<512xf32>> - %266 = flow.variable.address @"__iree_flow___sm_node1079__m.layer-147.gamma" : !iree.ptr<tensor<512xf32>> - %267 = flow.variable.address @"__iree_flow___sm_node1080__m.layer-147.beta" : !iree.ptr<tensor<512xf32>> - %268 = flow.variable.address @"__iree_flow___sm_node1081__m.layer-147.moving_mean" : !iree.ptr<tensor<512xf32>> - %269 = flow.variable.address @"__iree_flow___sm_node1082__m.layer-147.moving_variance" : !iree.ptr<tensor<512xf32>> - %270 = flow.variable.address @"__iree_flow___sm_node1097__m.layer-150.kernel" : !iree.ptr<tensor<1x1x512x2048xf32>> - %271 = flow.variable.address @"__iree_flow___sm_node1098__m.layer-150.bias" : !iree.ptr<tensor<2048xf32>> - %272 = flow.variable.address @"__iree_flow___sm_node1091__m.layer-149.kernel" : !iree.ptr<tensor<1x1x1024x2048xf32>> - %273 = flow.variable.address @"__iree_flow___sm_node1092__m.layer-149.bias" : !iree.ptr<tensor<2048xf32>> - %274 = flow.variable.address @"__iree_flow___sm_node1104__m.layer-151.gamma" : !iree.ptr<tensor<2048xf32>> - %275 = flow.variable.address @"__iree_flow___sm_node1105__m.layer-151.beta" : !iree.ptr<tensor<2048xf32>> - %276 = flow.variable.address @"__iree_flow___sm_node1106__m.layer-151.moving_mean" : !iree.ptr<tensor<2048xf32>> - %277 = flow.variable.address @"__iree_flow___sm_node1107__m.layer-151.moving_variance" : !iree.ptr<tensor<2048xf32>> - %278 = flow.variable.address @"__iree_flow___sm_node1113__m.layer-152.gamma" : !iree.ptr<tensor<2048xf32>> - %279 = flow.variable.address @"__iree_flow___sm_node1114__m.layer-152.beta" : !iree.ptr<tensor<2048xf32>> - %280 = flow.variable.address @"__iree_flow___sm_node1115__m.layer-152.moving_mean" : !iree.ptr<tensor<2048xf32>> - %281 = flow.variable.address @"__iree_flow___sm_node1116__m.layer-152.moving_variance" : !iree.ptr<tensor<2048xf32>> - %282 = flow.variable.address @"__iree_flow___sm_node1129__m.layer-155.kernel" : !iree.ptr<tensor<1x1x2048x512xf32>> - %283 = flow.variable.address @"__iree_flow___sm_node1130__m.layer-155.bias" : !iree.ptr<tensor<512xf32>> - %284 = flow.variable.address @"__iree_flow___sm_node1136__m.layer-156.gamma" : !iree.ptr<tensor<512xf32>> - %285 = flow.variable.address @"__iree_flow___sm_node1137__m.layer-156.beta" : !iree.ptr<tensor<512xf32>> - %286 = flow.variable.address @"__iree_flow___sm_node1138__m.layer-156.moving_mean" : !iree.ptr<tensor<512xf32>> - %287 = flow.variable.address @"__iree_flow___sm_node1139__m.layer-156.moving_variance" : !iree.ptr<tensor<512xf32>> - %288 = flow.variable.address @"__iree_flow___sm_node1148__m.layer-158.kernel" : !iree.ptr<tensor<3x3x512x512xf32>> - %289 = flow.variable.address @"__iree_flow___sm_node1149__m.layer-158.bias" : !iree.ptr<tensor<512xf32>> - %290 = flow.variable.address @"__iree_flow___sm_node1155__m.layer-159.gamma" : !iree.ptr<tensor<512xf32>> - %291 = flow.variable.address @"__iree_flow___sm_node1156__m.layer-159.beta" : !iree.ptr<tensor<512xf32>> - %292 = flow.variable.address @"__iree_flow___sm_node1157__m.layer-159.moving_mean" : !iree.ptr<tensor<512xf32>> - %293 = flow.variable.address @"__iree_flow___sm_node1158__m.layer-159.moving_variance" : !iree.ptr<tensor<512xf32>> - %294 = flow.variable.address @"__iree_flow___sm_node1167__m.layer-161.kernel" : !iree.ptr<tensor<1x1x512x2048xf32>> - %295 = flow.variable.address @"__iree_flow___sm_node1168__m.layer-161.bias" : !iree.ptr<tensor<2048xf32>> - %296 = flow.variable.address @"__iree_flow___sm_node1174__m.layer-162.gamma" : !iree.ptr<tensor<2048xf32>> - %297 = flow.variable.address @"__iree_flow___sm_node1175__m.layer-162.beta" : !iree.ptr<tensor<2048xf32>> - %298 = flow.variable.address @"__iree_flow___sm_node1176__m.layer-162.moving_mean" : !iree.ptr<tensor<2048xf32>> - %299 = flow.variable.address @"__iree_flow___sm_node1177__m.layer-162.moving_variance" : !iree.ptr<tensor<2048xf32>> - %300 = flow.variable.address @"__iree_flow___sm_node1190__m.layer-165.kernel" : !iree.ptr<tensor<1x1x2048x512xf32>> - %301 = flow.variable.address @"__iree_flow___sm_node1191__m.layer-165.bias" : !iree.ptr<tensor<512xf32>> - %302 = flow.variable.address @"__iree_flow___sm_node1197__m.layer-166.gamma" : !iree.ptr<tensor<512xf32>> - %303 = flow.variable.address @"__iree_flow___sm_node1198__m.layer-166.beta" : !iree.ptr<tensor<512xf32>> - %304 = flow.variable.address @"__iree_flow___sm_node1199__m.layer-166.moving_mean" : !iree.ptr<tensor<512xf32>> - %305 = flow.variable.address @"__iree_flow___sm_node1200__m.layer-166.moving_variance" : !iree.ptr<tensor<512xf32>> - %306 = flow.variable.address @"__iree_flow___sm_node1209__m.layer-168.kernel" : !iree.ptr<tensor<3x3x512x512xf32>> - %307 = flow.variable.address @"__iree_flow___sm_node1210__m.layer-168.bias" : !iree.ptr<tensor<512xf32>> - %308 = flow.variable.address @"__iree_flow___sm_node1216__m.layer-169.gamma" : !iree.ptr<tensor<512xf32>> - %309 = flow.variable.address @"__iree_flow___sm_node1217__m.layer-169.beta" : !iree.ptr<tensor<512xf32>> - %310 = flow.variable.address @"__iree_flow___sm_node1218__m.layer-169.moving_mean" : !iree.ptr<tensor<512xf32>> - %311 = flow.variable.address @"__iree_flow___sm_node1219__m.layer-169.moving_variance" : !iree.ptr<tensor<512xf32>> - %312 = flow.variable.address @"__iree_flow___sm_node1228__m.layer-171.kernel" : !iree.ptr<tensor<1x1x512x2048xf32>> - %313 = flow.variable.address @"__iree_flow___sm_node1229__m.layer-171.bias" : !iree.ptr<tensor<2048xf32>> - %314 = flow.variable.address @"__iree_flow___sm_node1235__m.layer-172.gamma" : !iree.ptr<tensor<2048xf32>> - %315 = flow.variable.address @"__iree_flow___sm_node1236__m.layer-172.beta" : !iree.ptr<tensor<2048xf32>> - %316 = flow.variable.address @"__iree_flow___sm_node1237__m.layer-172.moving_mean" : !iree.ptr<tensor<2048xf32>> - %317 = flow.variable.address @"__iree_flow___sm_node1238__m.layer-172.moving_variance" : !iree.ptr<tensor<2048xf32>> - %318 = flow.variable.address @"__iree_flow___sm_node1255__m.layer-176.kernel" : !iree.ptr<tensor<2048x1000xf32>> - %319 = flow.variable.address @"__iree_flow___sm_node1256__m.layer-176.bias" : !iree.ptr<tensor<1000xf32>> + %0 = flow.variable.address @"__iree_flow___sm_node188__m.layer-2.kernel" : !util.ptr<tensor<7x7x3x64xf32>> + %1 = flow.variable.address @"__iree_flow___sm_node189__m.layer-2.bias" : !util.ptr<tensor<64xf32>> + %2 = flow.variable.address @"__iree_flow___sm_node195__m.layer-3.gamma" : !util.ptr<tensor<64xf32>> + %3 = flow.variable.address @"__iree_flow___sm_node196__m.layer-3.beta" : !util.ptr<tensor<64xf32>> + %4 = flow.variable.address @"__iree_flow___sm_node197__m.layer-3.moving_mean" : !util.ptr<tensor<64xf32>> + %5 = flow.variable.address @"__iree_flow___sm_node198__m.layer-3.moving_variance" : !util.ptr<tensor<64xf32>> + %6 = flow.variable.address @"__iree_flow___sm_node215__m.layer-7.kernel" : !util.ptr<tensor<1x1x64x64xf32>> + %7 = flow.variable.address @"__iree_flow___sm_node216__m.layer-7.bias" : !util.ptr<tensor<64xf32>> + %8 = flow.variable.address @"__iree_flow___sm_node222__m.layer-8.gamma" : !util.ptr<tensor<64xf32>> + %9 = flow.variable.address @"__iree_flow___sm_node223__m.layer-8.beta" : !util.ptr<tensor<64xf32>> + %10 = flow.variable.address @"__iree_flow___sm_node224__m.layer-8.moving_mean" : !util.ptr<tensor<64xf32>> + %11 = flow.variable.address @"__iree_flow___sm_node225__m.layer-8.moving_variance" : !util.ptr<tensor<64xf32>> + %12 = flow.variable.address @"__iree_flow___sm_node234__m.layer-10.kernel" : !util.ptr<tensor<3x3x64x64xf32>> + %13 = flow.variable.address @"__iree_flow___sm_node235__m.layer-10.bias" : !util.ptr<tensor<64xf32>> + %14 = flow.variable.address @"__iree_flow___sm_node241__m.layer-11.gamma" : !util.ptr<tensor<64xf32>> + %15 = flow.variable.address @"__iree_flow___sm_node242__m.layer-11.beta" : !util.ptr<tensor<64xf32>> + %16 = flow.variable.address @"__iree_flow___sm_node243__m.layer-11.moving_mean" : !util.ptr<tensor<64xf32>> + %17 = flow.variable.address @"__iree_flow___sm_node244__m.layer-11.moving_variance" : !util.ptr<tensor<64xf32>> + %18 = flow.variable.address @"__iree_flow___sm_node259__m.layer-14.kernel" : !util.ptr<tensor<1x1x64x256xf32>> + %19 = flow.variable.address @"__iree_flow___sm_node260__m.layer-14.bias" : !util.ptr<tensor<256xf32>> + %20 = flow.variable.address @"__iree_flow___sm_node253__m.layer-13.kernel" : !util.ptr<tensor<1x1x64x256xf32>> + %21 = flow.variable.address @"__iree_flow___sm_node254__m.layer-13.bias" : !util.ptr<tensor<256xf32>> + %22 = flow.variable.address @"__iree_flow___sm_node266__m.layer-15.gamma" : !util.ptr<tensor<256xf32>> + %23 = flow.variable.address @"__iree_flow___sm_node267__m.layer-15.beta" : !util.ptr<tensor<256xf32>> + %24 = flow.variable.address @"__iree_flow___sm_node268__m.layer-15.moving_mean" : !util.ptr<tensor<256xf32>> + %25 = flow.variable.address @"__iree_flow___sm_node269__m.layer-15.moving_variance" : !util.ptr<tensor<256xf32>> + %26 = flow.variable.address @"__iree_flow___sm_node275__m.layer-16.gamma" : !util.ptr<tensor<256xf32>> + %27 = flow.variable.address @"__iree_flow___sm_node276__m.layer-16.beta" : !util.ptr<tensor<256xf32>> + %28 = flow.variable.address @"__iree_flow___sm_node277__m.layer-16.moving_mean" : !util.ptr<tensor<256xf32>> + %29 = flow.variable.address @"__iree_flow___sm_node278__m.layer-16.moving_variance" : !util.ptr<tensor<256xf32>> + %30 = flow.variable.address @"__iree_flow___sm_node291__m.layer-19.kernel" : !util.ptr<tensor<1x1x256x64xf32>> + %31 = flow.variable.address @"__iree_flow___sm_node292__m.layer-19.bias" : !util.ptr<tensor<64xf32>> + %32 = flow.variable.address @"__iree_flow___sm_node298__m.layer-20.gamma" : !util.ptr<tensor<64xf32>> + %33 = flow.variable.address @"__iree_flow___sm_node299__m.layer-20.beta" : !util.ptr<tensor<64xf32>> + %34 = flow.variable.address @"__iree_flow___sm_node300__m.layer-20.moving_mean" : !util.ptr<tensor<64xf32>> + %35 = flow.variable.address @"__iree_flow___sm_node301__m.layer-20.moving_variance" : !util.ptr<tensor<64xf32>> + %36 = flow.variable.address @"__iree_flow___sm_node310__m.layer-22.kernel" : !util.ptr<tensor<3x3x64x64xf32>> + %37 = flow.variable.address @"__iree_flow___sm_node311__m.layer-22.bias" : !util.ptr<tensor<64xf32>> + %38 = flow.variable.address @"__iree_flow___sm_node317__m.layer-23.gamma" : !util.ptr<tensor<64xf32>> + %39 = flow.variable.address @"__iree_flow___sm_node318__m.layer-23.beta" : !util.ptr<tensor<64xf32>> + %40 = flow.variable.address @"__iree_flow___sm_node319__m.layer-23.moving_mean" : !util.ptr<tensor<64xf32>> + %41 = flow.variable.address @"__iree_flow___sm_node320__m.layer-23.moving_variance" : !util.ptr<tensor<64xf32>> + %42 = flow.variable.address @"__iree_flow___sm_node329__m.layer-25.kernel" : !util.ptr<tensor<1x1x64x256xf32>> + %43 = flow.variable.address @"__iree_flow___sm_node330__m.layer-25.bias" : !util.ptr<tensor<256xf32>> + %44 = flow.variable.address @"__iree_flow___sm_node336__m.layer-26.gamma" : !util.ptr<tensor<256xf32>> + %45 = flow.variable.address @"__iree_flow___sm_node337__m.layer-26.beta" : !util.ptr<tensor<256xf32>> + %46 = flow.variable.address @"__iree_flow___sm_node338__m.layer-26.moving_mean" : !util.ptr<tensor<256xf32>> + %47 = flow.variable.address @"__iree_flow___sm_node339__m.layer-26.moving_variance" : !util.ptr<tensor<256xf32>> + %48 = flow.variable.address @"__iree_flow___sm_node352__m.layer-29.kernel" : !util.ptr<tensor<1x1x256x64xf32>> + %49 = flow.variable.address @"__iree_flow___sm_node353__m.layer-29.bias" : !util.ptr<tensor<64xf32>> + %50 = flow.variable.address @"__iree_flow___sm_node359__m.layer-30.gamma" : !util.ptr<tensor<64xf32>> + %51 = flow.variable.address @"__iree_flow___sm_node360__m.layer-30.beta" : !util.ptr<tensor<64xf32>> + %52 = flow.variable.address @"__iree_flow___sm_node361__m.layer-30.moving_mean" : !util.ptr<tensor<64xf32>> + %53 = flow.variable.address @"__iree_flow___sm_node362__m.layer-30.moving_variance" : !util.ptr<tensor<64xf32>> + %54 = flow.variable.address @"__iree_flow___sm_node371__m.layer-32.kernel" : !util.ptr<tensor<3x3x64x64xf32>> + %55 = flow.variable.address @"__iree_flow___sm_node372__m.layer-32.bias" : !util.ptr<tensor<64xf32>> + %56 = flow.variable.address @"__iree_flow___sm_node378__m.layer-33.gamma" : !util.ptr<tensor<64xf32>> + %57 = flow.variable.address @"__iree_flow___sm_node379__m.layer-33.beta" : !util.ptr<tensor<64xf32>> + %58 = flow.variable.address @"__iree_flow___sm_node380__m.layer-33.moving_mean" : !util.ptr<tensor<64xf32>> + %59 = flow.variable.address @"__iree_flow___sm_node381__m.layer-33.moving_variance" : !util.ptr<tensor<64xf32>> + %60 = flow.variable.address @"__iree_flow___sm_node390__m.layer-35.kernel" : !util.ptr<tensor<1x1x64x256xf32>> + %61 = flow.variable.address @"__iree_flow___sm_node391__m.layer-35.bias" : !util.ptr<tensor<256xf32>> + %62 = flow.variable.address @"__iree_flow___sm_node397__m.layer-36.gamma" : !util.ptr<tensor<256xf32>> + %63 = flow.variable.address @"__iree_flow___sm_node398__m.layer-36.beta" : !util.ptr<tensor<256xf32>> + %64 = flow.variable.address @"__iree_flow___sm_node399__m.layer-36.moving_mean" : !util.ptr<tensor<256xf32>> + %65 = flow.variable.address @"__iree_flow___sm_node400__m.layer-36.moving_variance" : !util.ptr<tensor<256xf32>> + %66 = flow.variable.address @"__iree_flow___sm_node413__m.layer-39.kernel" : !util.ptr<tensor<1x1x256x128xf32>> + %67 = flow.variable.address @"__iree_flow___sm_node414__m.layer-39.bias" : !util.ptr<tensor<128xf32>> + %68 = flow.variable.address @"__iree_flow___sm_node420__m.layer-40.gamma" : !util.ptr<tensor<128xf32>> + %69 = flow.variable.address @"__iree_flow___sm_node421__m.layer-40.beta" : !util.ptr<tensor<128xf32>> + %70 = flow.variable.address @"__iree_flow___sm_node422__m.layer-40.moving_mean" : !util.ptr<tensor<128xf32>> + %71 = flow.variable.address @"__iree_flow___sm_node423__m.layer-40.moving_variance" : !util.ptr<tensor<128xf32>> + %72 = flow.variable.address @"__iree_flow___sm_node432__m.layer-42.kernel" : !util.ptr<tensor<3x3x128x128xf32>> + %73 = flow.variable.address @"__iree_flow___sm_node433__m.layer-42.bias" : !util.ptr<tensor<128xf32>> + %74 = flow.variable.address @"__iree_flow___sm_node439__m.layer-43.gamma" : !util.ptr<tensor<128xf32>> + %75 = flow.variable.address @"__iree_flow___sm_node440__m.layer-43.beta" : !util.ptr<tensor<128xf32>> + %76 = flow.variable.address @"__iree_flow___sm_node441__m.layer-43.moving_mean" : !util.ptr<tensor<128xf32>> + %77 = flow.variable.address @"__iree_flow___sm_node442__m.layer-43.moving_variance" : !util.ptr<tensor<128xf32>> + %78 = flow.variable.address @"__iree_flow___sm_node457__m.layer-46.kernel" : !util.ptr<tensor<1x1x128x512xf32>> + %79 = flow.variable.address @"__iree_flow___sm_node458__m.layer-46.bias" : !util.ptr<tensor<512xf32>> + %80 = flow.variable.address @"__iree_flow___sm_node451__m.layer-45.kernel" : !util.ptr<tensor<1x1x256x512xf32>> + %81 = flow.variable.address @"__iree_flow___sm_node452__m.layer-45.bias" : !util.ptr<tensor<512xf32>> + %82 = flow.variable.address @"__iree_flow___sm_node464__m.layer-47.gamma" : !util.ptr<tensor<512xf32>> + %83 = flow.variable.address @"__iree_flow___sm_node465__m.layer-47.beta" : !util.ptr<tensor<512xf32>> + %84 = flow.variable.address @"__iree_flow___sm_node466__m.layer-47.moving_mean" : !util.ptr<tensor<512xf32>> + %85 = flow.variable.address @"__iree_flow___sm_node467__m.layer-47.moving_variance" : !util.ptr<tensor<512xf32>> + %86 = flow.variable.address @"__iree_flow___sm_node473__m.layer-48.gamma" : !util.ptr<tensor<512xf32>> + %87 = flow.variable.address @"__iree_flow___sm_node474__m.layer-48.beta" : !util.ptr<tensor<512xf32>> + %88 = flow.variable.address @"__iree_flow___sm_node475__m.layer-48.moving_mean" : !util.ptr<tensor<512xf32>> + %89 = flow.variable.address @"__iree_flow___sm_node476__m.layer-48.moving_variance" : !util.ptr<tensor<512xf32>> + %90 = flow.variable.address @"__iree_flow___sm_node489__m.layer-51.kernel" : !util.ptr<tensor<1x1x512x128xf32>> + %91 = flow.variable.address @"__iree_flow___sm_node490__m.layer-51.bias" : !util.ptr<tensor<128xf32>> + %92 = flow.variable.address @"__iree_flow___sm_node496__m.layer-52.gamma" : !util.ptr<tensor<128xf32>> + %93 = flow.variable.address @"__iree_flow___sm_node497__m.layer-52.beta" : !util.ptr<tensor<128xf32>> + %94 = flow.variable.address @"__iree_flow___sm_node498__m.layer-52.moving_mean" : !util.ptr<tensor<128xf32>> + %95 = flow.variable.address @"__iree_flow___sm_node499__m.layer-52.moving_variance" : !util.ptr<tensor<128xf32>> + %96 = flow.variable.address @"__iree_flow___sm_node508__m.layer-54.kernel" : !util.ptr<tensor<3x3x128x128xf32>> + %97 = flow.variable.address @"__iree_flow___sm_node509__m.layer-54.bias" : !util.ptr<tensor<128xf32>> + %98 = flow.variable.address @"__iree_flow___sm_node515__m.layer-55.gamma" : !util.ptr<tensor<128xf32>> + %99 = flow.variable.address @"__iree_flow___sm_node516__m.layer-55.beta" : !util.ptr<tensor<128xf32>> + %100 = flow.variable.address @"__iree_flow___sm_node517__m.layer-55.moving_mean" : !util.ptr<tensor<128xf32>> + %101 = flow.variable.address @"__iree_flow___sm_node518__m.layer-55.moving_variance" : !util.ptr<tensor<128xf32>> + %102 = flow.variable.address @"__iree_flow___sm_node527__m.layer-57.kernel" : !util.ptr<tensor<1x1x128x512xf32>> + %103 = flow.variable.address @"__iree_flow___sm_node528__m.layer-57.bias" : !util.ptr<tensor<512xf32>> + %104 = flow.variable.address @"__iree_flow___sm_node534__m.layer-58.gamma" : !util.ptr<tensor<512xf32>> + %105 = flow.variable.address @"__iree_flow___sm_node535__m.layer-58.beta" : !util.ptr<tensor<512xf32>> + %106 = flow.variable.address @"__iree_flow___sm_node536__m.layer-58.moving_mean" : !util.ptr<tensor<512xf32>> + %107 = flow.variable.address @"__iree_flow___sm_node537__m.layer-58.moving_variance" : !util.ptr<tensor<512xf32>> + %108 = flow.variable.address @"__iree_flow___sm_node550__m.layer-61.kernel" : !util.ptr<tensor<1x1x512x128xf32>> + %109 = flow.variable.address @"__iree_flow___sm_node551__m.layer-61.bias" : !util.ptr<tensor<128xf32>> + %110 = flow.variable.address @"__iree_flow___sm_node557__m.layer-62.gamma" : !util.ptr<tensor<128xf32>> + %111 = flow.variable.address @"__iree_flow___sm_node558__m.layer-62.beta" : !util.ptr<tensor<128xf32>> + %112 = flow.variable.address @"__iree_flow___sm_node559__m.layer-62.moving_mean" : !util.ptr<tensor<128xf32>> + %113 = flow.variable.address @"__iree_flow___sm_node560__m.layer-62.moving_variance" : !util.ptr<tensor<128xf32>> + %114 = flow.variable.address @"__iree_flow___sm_node569__m.layer-64.kernel" : !util.ptr<tensor<3x3x128x128xf32>> + %115 = flow.variable.address @"__iree_flow___sm_node570__m.layer-64.bias" : !util.ptr<tensor<128xf32>> + %116 = flow.variable.address @"__iree_flow___sm_node576__m.layer-65.gamma" : !util.ptr<tensor<128xf32>> + %117 = flow.variable.address @"__iree_flow___sm_node577__m.layer-65.beta" : !util.ptr<tensor<128xf32>> + %118 = flow.variable.address @"__iree_flow___sm_node578__m.layer-65.moving_mean" : !util.ptr<tensor<128xf32>> + %119 = flow.variable.address @"__iree_flow___sm_node579__m.layer-65.moving_variance" : !util.ptr<tensor<128xf32>> + %120 = flow.variable.address @"__iree_flow___sm_node588__m.layer-67.kernel" : !util.ptr<tensor<1x1x128x512xf32>> + %121 = flow.variable.address @"__iree_flow___sm_node589__m.layer-67.bias" : !util.ptr<tensor<512xf32>> + %122 = flow.variable.address @"__iree_flow___sm_node595__m.layer-68.gamma" : !util.ptr<tensor<512xf32>> + %123 = flow.variable.address @"__iree_flow___sm_node596__m.layer-68.beta" : !util.ptr<tensor<512xf32>> + %124 = flow.variable.address @"__iree_flow___sm_node597__m.layer-68.moving_mean" : !util.ptr<tensor<512xf32>> + %125 = flow.variable.address @"__iree_flow___sm_node598__m.layer-68.moving_variance" : !util.ptr<tensor<512xf32>> + %126 = flow.variable.address @"__iree_flow___sm_node611__m.layer-71.kernel" : !util.ptr<tensor<1x1x512x128xf32>> + %127 = flow.variable.address @"__iree_flow___sm_node612__m.layer-71.bias" : !util.ptr<tensor<128xf32>> + %128 = flow.variable.address @"__iree_flow___sm_node618__m.layer-72.gamma" : !util.ptr<tensor<128xf32>> + %129 = flow.variable.address @"__iree_flow___sm_node619__m.layer-72.beta" : !util.ptr<tensor<128xf32>> + %130 = flow.variable.address @"__iree_flow___sm_node620__m.layer-72.moving_mean" : !util.ptr<tensor<128xf32>> + %131 = flow.variable.address @"__iree_flow___sm_node621__m.layer-72.moving_variance" : !util.ptr<tensor<128xf32>> + %132 = flow.variable.address @"__iree_flow___sm_node630__m.layer-74.kernel" : !util.ptr<tensor<3x3x128x128xf32>> + %133 = flow.variable.address @"__iree_flow___sm_node631__m.layer-74.bias" : !util.ptr<tensor<128xf32>> + %134 = flow.variable.address @"__iree_flow___sm_node637__m.layer-75.gamma" : !util.ptr<tensor<128xf32>> + %135 = flow.variable.address @"__iree_flow___sm_node638__m.layer-75.beta" : !util.ptr<tensor<128xf32>> + %136 = flow.variable.address @"__iree_flow___sm_node639__m.layer-75.moving_mean" : !util.ptr<tensor<128xf32>> + %137 = flow.variable.address @"__iree_flow___sm_node640__m.layer-75.moving_variance" : !util.ptr<tensor<128xf32>> + %138 = flow.variable.address @"__iree_flow___sm_node649__m.layer-77.kernel" : !util.ptr<tensor<1x1x128x512xf32>> + %139 = flow.variable.address @"__iree_flow___sm_node650__m.layer-77.bias" : !util.ptr<tensor<512xf32>> + %140 = flow.variable.address @"__iree_flow___sm_node656__m.layer-78.gamma" : !util.ptr<tensor<512xf32>> + %141 = flow.variable.address @"__iree_flow___sm_node657__m.layer-78.beta" : !util.ptr<tensor<512xf32>> + %142 = flow.variable.address @"__iree_flow___sm_node658__m.layer-78.moving_mean" : !util.ptr<tensor<512xf32>> + %143 = flow.variable.address @"__iree_flow___sm_node659__m.layer-78.moving_variance" : !util.ptr<tensor<512xf32>> + %144 = flow.variable.address @"__iree_flow___sm_node672__m.layer-81.kernel" : !util.ptr<tensor<1x1x512x256xf32>> + %145 = flow.variable.address @"__iree_flow___sm_node673__m.layer-81.bias" : !util.ptr<tensor<256xf32>> + %146 = flow.variable.address @"__iree_flow___sm_node679__m.layer-82.gamma" : !util.ptr<tensor<256xf32>> + %147 = flow.variable.address @"__iree_flow___sm_node680__m.layer-82.beta" : !util.ptr<tensor<256xf32>> + %148 = flow.variable.address @"__iree_flow___sm_node681__m.layer-82.moving_mean" : !util.ptr<tensor<256xf32>> + %149 = flow.variable.address @"__iree_flow___sm_node682__m.layer-82.moving_variance" : !util.ptr<tensor<256xf32>> + %150 = flow.variable.address @"__iree_flow___sm_node691__m.layer-84.kernel" : !util.ptr<tensor<3x3x256x256xf32>> + %151 = flow.variable.address @"__iree_flow___sm_node692__m.layer-84.bias" : !util.ptr<tensor<256xf32>> + %152 = flow.variable.address @"__iree_flow___sm_node698__m.layer-85.gamma" : !util.ptr<tensor<256xf32>> + %153 = flow.variable.address @"__iree_flow___sm_node699__m.layer-85.beta" : !util.ptr<tensor<256xf32>> + %154 = flow.variable.address @"__iree_flow___sm_node700__m.layer-85.moving_mean" : !util.ptr<tensor<256xf32>> + %155 = flow.variable.address @"__iree_flow___sm_node701__m.layer-85.moving_variance" : !util.ptr<tensor<256xf32>> + %156 = flow.variable.address @"__iree_flow___sm_node716__m.layer-88.kernel" : !util.ptr<tensor<1x1x256x1024xf32>> + %157 = flow.variable.address @"__iree_flow___sm_node717__m.layer-88.bias" : !util.ptr<tensor<1024xf32>> + %158 = flow.variable.address @"__iree_flow___sm_node710__m.layer-87.kernel" : !util.ptr<tensor<1x1x512x1024xf32>> + %159 = flow.variable.address @"__iree_flow___sm_node711__m.layer-87.bias" : !util.ptr<tensor<1024xf32>> + %160 = flow.variable.address @"__iree_flow___sm_node723__m.layer-89.gamma" : !util.ptr<tensor<1024xf32>> + %161 = flow.variable.address @"__iree_flow___sm_node724__m.layer-89.beta" : !util.ptr<tensor<1024xf32>> + %162 = flow.variable.address @"__iree_flow___sm_node725__m.layer-89.moving_mean" : !util.ptr<tensor<1024xf32>> + %163 = flow.variable.address @"__iree_flow___sm_node726__m.layer-89.moving_variance" : !util.ptr<tensor<1024xf32>> + %164 = flow.variable.address @"__iree_flow___sm_node732__m.layer-90.gamma" : !util.ptr<tensor<1024xf32>> + %165 = flow.variable.address @"__iree_flow___sm_node733__m.layer-90.beta" : !util.ptr<tensor<1024xf32>> + %166 = flow.variable.address @"__iree_flow___sm_node734__m.layer-90.moving_mean" : !util.ptr<tensor<1024xf32>> + %167 = flow.variable.address @"__iree_flow___sm_node735__m.layer-90.moving_variance" : !util.ptr<tensor<1024xf32>> + %168 = flow.variable.address @"__iree_flow___sm_node748__m.layer-93.kernel" : !util.ptr<tensor<1x1x1024x256xf32>> + %169 = flow.variable.address @"__iree_flow___sm_node749__m.layer-93.bias" : !util.ptr<tensor<256xf32>> + %170 = flow.variable.address @"__iree_flow___sm_node755__m.layer-94.gamma" : !util.ptr<tensor<256xf32>> + %171 = flow.variable.address @"__iree_flow___sm_node756__m.layer-94.beta" : !util.ptr<tensor<256xf32>> + %172 = flow.variable.address @"__iree_flow___sm_node757__m.layer-94.moving_mean" : !util.ptr<tensor<256xf32>> + %173 = flow.variable.address @"__iree_flow___sm_node758__m.layer-94.moving_variance" : !util.ptr<tensor<256xf32>> + %174 = flow.variable.address @"__iree_flow___sm_node767__m.layer-96.kernel" : !util.ptr<tensor<3x3x256x256xf32>> + %175 = flow.variable.address @"__iree_flow___sm_node768__m.layer-96.bias" : !util.ptr<tensor<256xf32>> + %176 = flow.variable.address @"__iree_flow___sm_node774__m.layer-97.gamma" : !util.ptr<tensor<256xf32>> + %177 = flow.variable.address @"__iree_flow___sm_node775__m.layer-97.beta" : !util.ptr<tensor<256xf32>> + %178 = flow.variable.address @"__iree_flow___sm_node776__m.layer-97.moving_mean" : !util.ptr<tensor<256xf32>> + %179 = flow.variable.address @"__iree_flow___sm_node777__m.layer-97.moving_variance" : !util.ptr<tensor<256xf32>> + %180 = flow.variable.address @"__iree_flow___sm_node786__m.layer-99.kernel" : !util.ptr<tensor<1x1x256x1024xf32>> + %181 = flow.variable.address @"__iree_flow___sm_node787__m.layer-99.bias" : !util.ptr<tensor<1024xf32>> + %182 = flow.variable.address @"__iree_flow___sm_node793__m.layer-100.gamma" : !util.ptr<tensor<1024xf32>> + %183 = flow.variable.address @"__iree_flow___sm_node794__m.layer-100.beta" : !util.ptr<tensor<1024xf32>> + %184 = flow.variable.address @"__iree_flow___sm_node795__m.layer-100.moving_mean" : !util.ptr<tensor<1024xf32>> + %185 = flow.variable.address @"__iree_flow___sm_node796__m.layer-100.moving_variance" : !util.ptr<tensor<1024xf32>> + %186 = flow.variable.address @"__iree_flow___sm_node809__m.layer-103.kernel" : !util.ptr<tensor<1x1x1024x256xf32>> + %187 = flow.variable.address @"__iree_flow___sm_node810__m.layer-103.bias" : !util.ptr<tensor<256xf32>> + %188 = flow.variable.address @"__iree_flow___sm_node816__m.layer-104.gamma" : !util.ptr<tensor<256xf32>> + %189 = flow.variable.address @"__iree_flow___sm_node817__m.layer-104.beta" : !util.ptr<tensor<256xf32>> + %190 = flow.variable.address @"__iree_flow___sm_node818__m.layer-104.moving_mean" : !util.ptr<tensor<256xf32>> + %191 = flow.variable.address @"__iree_flow___sm_node819__m.layer-104.moving_variance" : !util.ptr<tensor<256xf32>> + %192 = flow.variable.address @"__iree_flow___sm_node828__m.layer-106.kernel" : !util.ptr<tensor<3x3x256x256xf32>> + %193 = flow.variable.address @"__iree_flow___sm_node829__m.layer-106.bias" : !util.ptr<tensor<256xf32>> + %194 = flow.variable.address @"__iree_flow___sm_node835__m.layer-107.gamma" : !util.ptr<tensor<256xf32>> + %195 = flow.variable.address @"__iree_flow___sm_node836__m.layer-107.beta" : !util.ptr<tensor<256xf32>> + %196 = flow.variable.address @"__iree_flow___sm_node837__m.layer-107.moving_mean" : !util.ptr<tensor<256xf32>> + %197 = flow.variable.address @"__iree_flow___sm_node838__m.layer-107.moving_variance" : !util.ptr<tensor<256xf32>> + %198 = flow.variable.address @"__iree_flow___sm_node847__m.layer-109.kernel" : !util.ptr<tensor<1x1x256x1024xf32>> + %199 = flow.variable.address @"__iree_flow___sm_node848__m.layer-109.bias" : !util.ptr<tensor<1024xf32>> + %200 = flow.variable.address @"__iree_flow___sm_node854__m.layer-110.gamma" : !util.ptr<tensor<1024xf32>> + %201 = flow.variable.address @"__iree_flow___sm_node855__m.layer-110.beta" : !util.ptr<tensor<1024xf32>> + %202 = flow.variable.address @"__iree_flow___sm_node856__m.layer-110.moving_mean" : !util.ptr<tensor<1024xf32>> + %203 = flow.variable.address @"__iree_flow___sm_node857__m.layer-110.moving_variance" : !util.ptr<tensor<1024xf32>> + %204 = flow.variable.address @"__iree_flow___sm_node870__m.layer-113.kernel" : !util.ptr<tensor<1x1x1024x256xf32>> + %205 = flow.variable.address @"__iree_flow___sm_node871__m.layer-113.bias" : !util.ptr<tensor<256xf32>> + %206 = flow.variable.address @"__iree_flow___sm_node877__m.layer-114.gamma" : !util.ptr<tensor<256xf32>> + %207 = flow.variable.address @"__iree_flow___sm_node878__m.layer-114.beta" : !util.ptr<tensor<256xf32>> + %208 = flow.variable.address @"__iree_flow___sm_node879__m.layer-114.moving_mean" : !util.ptr<tensor<256xf32>> + %209 = flow.variable.address @"__iree_flow___sm_node880__m.layer-114.moving_variance" : !util.ptr<tensor<256xf32>> + %210 = flow.variable.address @"__iree_flow___sm_node889__m.layer-116.kernel" : !util.ptr<tensor<3x3x256x256xf32>> + %211 = flow.variable.address @"__iree_flow___sm_node890__m.layer-116.bias" : !util.ptr<tensor<256xf32>> + %212 = flow.variable.address @"__iree_flow___sm_node896__m.layer-117.gamma" : !util.ptr<tensor<256xf32>> + %213 = flow.variable.address @"__iree_flow___sm_node897__m.layer-117.beta" : !util.ptr<tensor<256xf32>> + %214 = flow.variable.address @"__iree_flow___sm_node898__m.layer-117.moving_mean" : !util.ptr<tensor<256xf32>> + %215 = flow.variable.address @"__iree_flow___sm_node899__m.layer-117.moving_variance" : !util.ptr<tensor<256xf32>> + %216 = flow.variable.address @"__iree_flow___sm_node908__m.layer-119.kernel" : !util.ptr<tensor<1x1x256x1024xf32>> + %217 = flow.variable.address @"__iree_flow___sm_node909__m.layer-119.bias" : !util.ptr<tensor<1024xf32>> + %218 = flow.variable.address @"__iree_flow___sm_node915__m.layer-120.gamma" : !util.ptr<tensor<1024xf32>> + %219 = flow.variable.address @"__iree_flow___sm_node916__m.layer-120.beta" : !util.ptr<tensor<1024xf32>> + %220 = flow.variable.address @"__iree_flow___sm_node917__m.layer-120.moving_mean" : !util.ptr<tensor<1024xf32>> + %221 = flow.variable.address @"__iree_flow___sm_node918__m.layer-120.moving_variance" : !util.ptr<tensor<1024xf32>> + %222 = flow.variable.address @"__iree_flow___sm_node931__m.layer-123.kernel" : !util.ptr<tensor<1x1x1024x256xf32>> + %223 = flow.variable.address @"__iree_flow___sm_node932__m.layer-123.bias" : !util.ptr<tensor<256xf32>> + %224 = flow.variable.address @"__iree_flow___sm_node938__m.layer-124.gamma" : !util.ptr<tensor<256xf32>> + %225 = flow.variable.address @"__iree_flow___sm_node939__m.layer-124.beta" : !util.ptr<tensor<256xf32>> + %226 = flow.variable.address @"__iree_flow___sm_node940__m.layer-124.moving_mean" : !util.ptr<tensor<256xf32>> + %227 = flow.variable.address @"__iree_flow___sm_node941__m.layer-124.moving_variance" : !util.ptr<tensor<256xf32>> + %228 = flow.variable.address @"__iree_flow___sm_node950__m.layer-126.kernel" : !util.ptr<tensor<3x3x256x256xf32>> + %229 = flow.variable.address @"__iree_flow___sm_node951__m.layer-126.bias" : !util.ptr<tensor<256xf32>> + %230 = flow.variable.address @"__iree_flow___sm_node957__m.layer-127.gamma" : !util.ptr<tensor<256xf32>> + %231 = flow.variable.address @"__iree_flow___sm_node958__m.layer-127.beta" : !util.ptr<tensor<256xf32>> + %232 = flow.variable.address @"__iree_flow___sm_node959__m.layer-127.moving_mean" : !util.ptr<tensor<256xf32>> + %233 = flow.variable.address @"__iree_flow___sm_node960__m.layer-127.moving_variance" : !util.ptr<tensor<256xf32>> + %234 = flow.variable.address @"__iree_flow___sm_node969__m.layer-129.kernel" : !util.ptr<tensor<1x1x256x1024xf32>> + %235 = flow.variable.address @"__iree_flow___sm_node970__m.layer-129.bias" : !util.ptr<tensor<1024xf32>> + %236 = flow.variable.address @"__iree_flow___sm_node976__m.layer-130.gamma" : !util.ptr<tensor<1024xf32>> + %237 = flow.variable.address @"__iree_flow___sm_node977__m.layer-130.beta" : !util.ptr<tensor<1024xf32>> + %238 = flow.variable.address @"__iree_flow___sm_node978__m.layer-130.moving_mean" : !util.ptr<tensor<1024xf32>> + %239 = flow.variable.address @"__iree_flow___sm_node979__m.layer-130.moving_variance" : !util.ptr<tensor<1024xf32>> + %240 = flow.variable.address @"__iree_flow___sm_node992__m.layer-133.kernel" : !util.ptr<tensor<1x1x1024x256xf32>> + %241 = flow.variable.address @"__iree_flow___sm_node993__m.layer-133.bias" : !util.ptr<tensor<256xf32>> + %242 = flow.variable.address @"__iree_flow___sm_node999__m.layer-134.gamma" : !util.ptr<tensor<256xf32>> + %243 = flow.variable.address @"__iree_flow___sm_node1000__m.layer-134.beta" : !util.ptr<tensor<256xf32>> + %244 = flow.variable.address @"__iree_flow___sm_node1001__m.layer-134.moving_mean" : !util.ptr<tensor<256xf32>> + %245 = flow.variable.address @"__iree_flow___sm_node1002__m.layer-134.moving_variance" : !util.ptr<tensor<256xf32>> + %246 = flow.variable.address @"__iree_flow___sm_node1011__m.layer-136.kernel" : !util.ptr<tensor<3x3x256x256xf32>> + %247 = flow.variable.address @"__iree_flow___sm_node1012__m.layer-136.bias" : !util.ptr<tensor<256xf32>> + %248 = flow.variable.address @"__iree_flow___sm_node1018__m.layer-137.gamma" : !util.ptr<tensor<256xf32>> + %249 = flow.variable.address @"__iree_flow___sm_node1019__m.layer-137.beta" : !util.ptr<tensor<256xf32>> + %250 = flow.variable.address @"__iree_flow___sm_node1020__m.layer-137.moving_mean" : !util.ptr<tensor<256xf32>> + %251 = flow.variable.address @"__iree_flow___sm_node1021__m.layer-137.moving_variance" : !util.ptr<tensor<256xf32>> + %252 = flow.variable.address @"__iree_flow___sm_node1030__m.layer-139.kernel" : !util.ptr<tensor<1x1x256x1024xf32>> + %253 = flow.variable.address @"__iree_flow___sm_node1031__m.layer-139.bias" : !util.ptr<tensor<1024xf32>> + %254 = flow.variable.address @"__iree_flow___sm_node1037__m.layer-140.gamma" : !util.ptr<tensor<1024xf32>> + %255 = flow.variable.address @"__iree_flow___sm_node1038__m.layer-140.beta" : !util.ptr<tensor<1024xf32>> + %256 = flow.variable.address @"__iree_flow___sm_node1039__m.layer-140.moving_mean" : !util.ptr<tensor<1024xf32>> + %257 = flow.variable.address @"__iree_flow___sm_node1040__m.layer-140.moving_variance" : !util.ptr<tensor<1024xf32>> + %258 = flow.variable.address @"__iree_flow___sm_node1053__m.layer-143.kernel" : !util.ptr<tensor<1x1x1024x512xf32>> + %259 = flow.variable.address @"__iree_flow___sm_node1054__m.layer-143.bias" : !util.ptr<tensor<512xf32>> + %260 = flow.variable.address @"__iree_flow___sm_node1060__m.layer-144.gamma" : !util.ptr<tensor<512xf32>> + %261 = flow.variable.address @"__iree_flow___sm_node1061__m.layer-144.beta" : !util.ptr<tensor<512xf32>> + %262 = flow.variable.address @"__iree_flow___sm_node1062__m.layer-144.moving_mean" : !util.ptr<tensor<512xf32>> + %263 = flow.variable.address @"__iree_flow___sm_node1063__m.layer-144.moving_variance" : !util.ptr<tensor<512xf32>> + %264 = flow.variable.address @"__iree_flow___sm_node1072__m.layer-146.kernel" : !util.ptr<tensor<3x3x512x512xf32>> + %265 = flow.variable.address @"__iree_flow___sm_node1073__m.layer-146.bias" : !util.ptr<tensor<512xf32>> + %266 = flow.variable.address @"__iree_flow___sm_node1079__m.layer-147.gamma" : !util.ptr<tensor<512xf32>> + %267 = flow.variable.address @"__iree_flow___sm_node1080__m.layer-147.beta" : !util.ptr<tensor<512xf32>> + %268 = flow.variable.address @"__iree_flow___sm_node1081__m.layer-147.moving_mean" : !util.ptr<tensor<512xf32>> + %269 = flow.variable.address @"__iree_flow___sm_node1082__m.layer-147.moving_variance" : !util.ptr<tensor<512xf32>> + %270 = flow.variable.address @"__iree_flow___sm_node1097__m.layer-150.kernel" : !util.ptr<tensor<1x1x512x2048xf32>> + %271 = flow.variable.address @"__iree_flow___sm_node1098__m.layer-150.bias" : !util.ptr<tensor<2048xf32>> + %272 = flow.variable.address @"__iree_flow___sm_node1091__m.layer-149.kernel" : !util.ptr<tensor<1x1x1024x2048xf32>> + %273 = flow.variable.address @"__iree_flow___sm_node1092__m.layer-149.bias" : !util.ptr<tensor<2048xf32>> + %274 = flow.variable.address @"__iree_flow___sm_node1104__m.layer-151.gamma" : !util.ptr<tensor<2048xf32>> + %275 = flow.variable.address @"__iree_flow___sm_node1105__m.layer-151.beta" : !util.ptr<tensor<2048xf32>> + %276 = flow.variable.address @"__iree_flow___sm_node1106__m.layer-151.moving_mean" : !util.ptr<tensor<2048xf32>> + %277 = flow.variable.address @"__iree_flow___sm_node1107__m.layer-151.moving_variance" : !util.ptr<tensor<2048xf32>> + %278 = flow.variable.address @"__iree_flow___sm_node1113__m.layer-152.gamma" : !util.ptr<tensor<2048xf32>> + %279 = flow.variable.address @"__iree_flow___sm_node1114__m.layer-152.beta" : !util.ptr<tensor<2048xf32>> + %280 = flow.variable.address @"__iree_flow___sm_node1115__m.layer-152.moving_mean" : !util.ptr<tensor<2048xf32>> + %281 = flow.variable.address @"__iree_flow___sm_node1116__m.layer-152.moving_variance" : !util.ptr<tensor<2048xf32>> + %282 = flow.variable.address @"__iree_flow___sm_node1129__m.layer-155.kernel" : !util.ptr<tensor<1x1x2048x512xf32>> + %283 = flow.variable.address @"__iree_flow___sm_node1130__m.layer-155.bias" : !util.ptr<tensor<512xf32>> + %284 = flow.variable.address @"__iree_flow___sm_node1136__m.layer-156.gamma" : !util.ptr<tensor<512xf32>> + %285 = flow.variable.address @"__iree_flow___sm_node1137__m.layer-156.beta" : !util.ptr<tensor<512xf32>> + %286 = flow.variable.address @"__iree_flow___sm_node1138__m.layer-156.moving_mean" : !util.ptr<tensor<512xf32>> + %287 = flow.variable.address @"__iree_flow___sm_node1139__m.layer-156.moving_variance" : !util.ptr<tensor<512xf32>> + %288 = flow.variable.address @"__iree_flow___sm_node1148__m.layer-158.kernel" : !util.ptr<tensor<3x3x512x512xf32>> + %289 = flow.variable.address @"__iree_flow___sm_node1149__m.layer-158.bias" : !util.ptr<tensor<512xf32>> + %290 = flow.variable.address @"__iree_flow___sm_node1155__m.layer-159.gamma" : !util.ptr<tensor<512xf32>> + %291 = flow.variable.address @"__iree_flow___sm_node1156__m.layer-159.beta" : !util.ptr<tensor<512xf32>> + %292 = flow.variable.address @"__iree_flow___sm_node1157__m.layer-159.moving_mean" : !util.ptr<tensor<512xf32>> + %293 = flow.variable.address @"__iree_flow___sm_node1158__m.layer-159.moving_variance" : !util.ptr<tensor<512xf32>> + %294 = flow.variable.address @"__iree_flow___sm_node1167__m.layer-161.kernel" : !util.ptr<tensor<1x1x512x2048xf32>> + %295 = flow.variable.address @"__iree_flow___sm_node1168__m.layer-161.bias" : !util.ptr<tensor<2048xf32>> + %296 = flow.variable.address @"__iree_flow___sm_node1174__m.layer-162.gamma" : !util.ptr<tensor<2048xf32>> + %297 = flow.variable.address @"__iree_flow___sm_node1175__m.layer-162.beta" : !util.ptr<tensor<2048xf32>> + %298 = flow.variable.address @"__iree_flow___sm_node1176__m.layer-162.moving_mean" : !util.ptr<tensor<2048xf32>> + %299 = flow.variable.address @"__iree_flow___sm_node1177__m.layer-162.moving_variance" : !util.ptr<tensor<2048xf32>> + %300 = flow.variable.address @"__iree_flow___sm_node1190__m.layer-165.kernel" : !util.ptr<tensor<1x1x2048x512xf32>> + %301 = flow.variable.address @"__iree_flow___sm_node1191__m.layer-165.bias" : !util.ptr<tensor<512xf32>> + %302 = flow.variable.address @"__iree_flow___sm_node1197__m.layer-166.gamma" : !util.ptr<tensor<512xf32>> + %303 = flow.variable.address @"__iree_flow___sm_node1198__m.layer-166.beta" : !util.ptr<tensor<512xf32>> + %304 = flow.variable.address @"__iree_flow___sm_node1199__m.layer-166.moving_mean" : !util.ptr<tensor<512xf32>> + %305 = flow.variable.address @"__iree_flow___sm_node1200__m.layer-166.moving_variance" : !util.ptr<tensor<512xf32>> + %306 = flow.variable.address @"__iree_flow___sm_node1209__m.layer-168.kernel" : !util.ptr<tensor<3x3x512x512xf32>> + %307 = flow.variable.address @"__iree_flow___sm_node1210__m.layer-168.bias" : !util.ptr<tensor<512xf32>> + %308 = flow.variable.address @"__iree_flow___sm_node1216__m.layer-169.gamma" : !util.ptr<tensor<512xf32>> + %309 = flow.variable.address @"__iree_flow___sm_node1217__m.layer-169.beta" : !util.ptr<tensor<512xf32>> + %310 = flow.variable.address @"__iree_flow___sm_node1218__m.layer-169.moving_mean" : !util.ptr<tensor<512xf32>> + %311 = flow.variable.address @"__iree_flow___sm_node1219__m.layer-169.moving_variance" : !util.ptr<tensor<512xf32>> + %312 = flow.variable.address @"__iree_flow___sm_node1228__m.layer-171.kernel" : !util.ptr<tensor<1x1x512x2048xf32>> + %313 = flow.variable.address @"__iree_flow___sm_node1229__m.layer-171.bias" : !util.ptr<tensor<2048xf32>> + %314 = flow.variable.address @"__iree_flow___sm_node1235__m.layer-172.gamma" : !util.ptr<tensor<2048xf32>> + %315 = flow.variable.address @"__iree_flow___sm_node1236__m.layer-172.beta" : !util.ptr<tensor<2048xf32>> + %316 = flow.variable.address @"__iree_flow___sm_node1237__m.layer-172.moving_mean" : !util.ptr<tensor<2048xf32>> + %317 = flow.variable.address @"__iree_flow___sm_node1238__m.layer-172.moving_variance" : !util.ptr<tensor<2048xf32>> + %318 = flow.variable.address @"__iree_flow___sm_node1255__m.layer-176.kernel" : !util.ptr<tensor<2048x1000xf32>> + %319 = flow.variable.address @"__iree_flow___sm_node1256__m.layer-176.bias" : !util.ptr<tensor<1000xf32>> %320 = mhlo.constant dense<0.000000e+00> : tensor<1x112x112x64xf32> %321 = mhlo.constant dense<0.000000e+00> : tensor<1x56x56x64xf32> %322 = mhlo.constant dense<0.000000e+00> : tensor<1x56x56x256xf32> @@ -659,326 +659,326 @@ %329 = mhlo.constant dense<4.900000e+01> : tensor<1x2048xf32> %330 = mhlo.constant dense<0xFF800000> : tensor<f32> %331 = mhlo.constant dense<0.000000e+00> : tensor<f32> - %332 = flow.variable.load.indirect %5 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %333 = flow.variable.load.indirect %4 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %334 = flow.variable.load.indirect %3 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %335 = flow.variable.load.indirect %2 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %336 = flow.variable.load.indirect %1 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %337 = flow.variable.load.indirect %0 : !iree.ptr<tensor<7x7x3x64xf32>> -> tensor<7x7x3x64xf32> - %338 = flow.variable.load.indirect %25 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %339 = flow.variable.load.indirect %24 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %340 = flow.variable.load.indirect %23 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %341 = flow.variable.load.indirect %22 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %342 = flow.variable.load.indirect %21 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %343 = flow.variable.load.indirect %20 : !iree.ptr<tensor<1x1x64x256xf32>> -> tensor<1x1x64x256xf32> - %344 = flow.variable.load.indirect %11 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %345 = flow.variable.load.indirect %10 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %346 = flow.variable.load.indirect %9 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %347 = flow.variable.load.indirect %8 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %348 = flow.variable.load.indirect %7 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %349 = flow.variable.load.indirect %6 : !iree.ptr<tensor<1x1x64x64xf32>> -> tensor<1x1x64x64xf32> - %350 = flow.variable.load.indirect %17 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %351 = flow.variable.load.indirect %16 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %352 = flow.variable.load.indirect %15 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %353 = flow.variable.load.indirect %14 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %354 = flow.variable.load.indirect %13 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %355 = flow.variable.load.indirect %12 : !iree.ptr<tensor<3x3x64x64xf32>> -> tensor<3x3x64x64xf32> - %356 = flow.variable.load.indirect %29 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %357 = flow.variable.load.indirect %28 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %358 = flow.variable.load.indirect %27 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %359 = flow.variable.load.indirect %26 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %360 = flow.variable.load.indirect %19 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %361 = flow.variable.load.indirect %18 : !iree.ptr<tensor<1x1x64x256xf32>> -> tensor<1x1x64x256xf32> - %362 = flow.variable.load.indirect %35 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %363 = flow.variable.load.indirect %34 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %364 = flow.variable.load.indirect %33 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %365 = flow.variable.load.indirect %32 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %366 = flow.variable.load.indirect %31 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %367 = flow.variable.load.indirect %30 : !iree.ptr<tensor<1x1x256x64xf32>> -> tensor<1x1x256x64xf32> - %368 = flow.variable.load.indirect %41 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %369 = flow.variable.load.indirect %40 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %370 = flow.variable.load.indirect %39 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %371 = flow.variable.load.indirect %38 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %372 = flow.variable.load.indirect %37 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %373 = flow.variable.load.indirect %36 : !iree.ptr<tensor<3x3x64x64xf32>> -> tensor<3x3x64x64xf32> - %374 = flow.variable.load.indirect %47 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %375 = flow.variable.load.indirect %46 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %376 = flow.variable.load.indirect %45 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %377 = flow.variable.load.indirect %44 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %378 = flow.variable.load.indirect %43 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %379 = flow.variable.load.indirect %42 : !iree.ptr<tensor<1x1x64x256xf32>> -> tensor<1x1x64x256xf32> - %380 = flow.variable.load.indirect %53 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %381 = flow.variable.load.indirect %52 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %382 = flow.variable.load.indirect %51 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %383 = flow.variable.load.indirect %50 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %384 = flow.variable.load.indirect %49 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %385 = flow.variable.load.indirect %48 : !iree.ptr<tensor<1x1x256x64xf32>> -> tensor<1x1x256x64xf32> - %386 = flow.variable.load.indirect %59 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %387 = flow.variable.load.indirect %58 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %388 = flow.variable.load.indirect %57 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %389 = flow.variable.load.indirect %56 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %390 = flow.variable.load.indirect %55 : !iree.ptr<tensor<64xf32>> -> tensor<64xf32> - %391 = flow.variable.load.indirect %54 : !iree.ptr<tensor<3x3x64x64xf32>> -> tensor<3x3x64x64xf32> - %392 = flow.variable.load.indirect %65 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %393 = flow.variable.load.indirect %64 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %394 = flow.variable.load.indirect %63 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %395 = flow.variable.load.indirect %62 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %396 = flow.variable.load.indirect %61 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %397 = flow.variable.load.indirect %60 : !iree.ptr<tensor<1x1x64x256xf32>> -> tensor<1x1x64x256xf32> - %398 = flow.variable.load.indirect %85 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %399 = flow.variable.load.indirect %84 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %400 = flow.variable.load.indirect %83 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %401 = flow.variable.load.indirect %82 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %402 = flow.variable.load.indirect %81 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %403 = flow.variable.load.indirect %80 : !iree.ptr<tensor<1x1x256x512xf32>> -> tensor<1x1x256x512xf32> - %404 = flow.variable.load.indirect %71 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %405 = flow.variable.load.indirect %70 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %406 = flow.variable.load.indirect %69 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %407 = flow.variable.load.indirect %68 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %408 = flow.variable.load.indirect %67 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %409 = flow.variable.load.indirect %66 : !iree.ptr<tensor<1x1x256x128xf32>> -> tensor<1x1x256x128xf32> - %410 = flow.variable.load.indirect %77 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %411 = flow.variable.load.indirect %76 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %412 = flow.variable.load.indirect %75 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %413 = flow.variable.load.indirect %74 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %414 = flow.variable.load.indirect %73 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %415 = flow.variable.load.indirect %72 : !iree.ptr<tensor<3x3x128x128xf32>> -> tensor<3x3x128x128xf32> - %416 = flow.variable.load.indirect %89 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %417 = flow.variable.load.indirect %88 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %418 = flow.variable.load.indirect %87 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %419 = flow.variable.load.indirect %86 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %420 = flow.variable.load.indirect %79 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %421 = flow.variable.load.indirect %78 : !iree.ptr<tensor<1x1x128x512xf32>> -> tensor<1x1x128x512xf32> - %422 = flow.variable.load.indirect %95 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %423 = flow.variable.load.indirect %94 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %424 = flow.variable.load.indirect %93 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %425 = flow.variable.load.indirect %92 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %426 = flow.variable.load.indirect %91 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %427 = flow.variable.load.indirect %90 : !iree.ptr<tensor<1x1x512x128xf32>> -> tensor<1x1x512x128xf32> - %428 = flow.variable.load.indirect %101 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %429 = flow.variable.load.indirect %100 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %430 = flow.variable.load.indirect %99 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %431 = flow.variable.load.indirect %98 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %432 = flow.variable.load.indirect %97 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %433 = flow.variable.load.indirect %96 : !iree.ptr<tensor<3x3x128x128xf32>> -> tensor<3x3x128x128xf32> - %434 = flow.variable.load.indirect %107 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %435 = flow.variable.load.indirect %106 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %436 = flow.variable.load.indirect %105 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %437 = flow.variable.load.indirect %104 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %438 = flow.variable.load.indirect %103 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %439 = flow.variable.load.indirect %102 : !iree.ptr<tensor<1x1x128x512xf32>> -> tensor<1x1x128x512xf32> - %440 = flow.variable.load.indirect %113 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %441 = flow.variable.load.indirect %112 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %442 = flow.variable.load.indirect %111 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %443 = flow.variable.load.indirect %110 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %444 = flow.variable.load.indirect %109 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %445 = flow.variable.load.indirect %108 : !iree.ptr<tensor<1x1x512x128xf32>> -> tensor<1x1x512x128xf32> - %446 = flow.variable.load.indirect %119 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %447 = flow.variable.load.indirect %118 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %448 = flow.variable.load.indirect %117 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %449 = flow.variable.load.indirect %116 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %450 = flow.variable.load.indirect %115 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %451 = flow.variable.load.indirect %114 : !iree.ptr<tensor<3x3x128x128xf32>> -> tensor<3x3x128x128xf32> - %452 = flow.variable.load.indirect %125 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %453 = flow.variable.load.indirect %124 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %454 = flow.variable.load.indirect %123 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %455 = flow.variable.load.indirect %122 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %456 = flow.variable.load.indirect %121 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %457 = flow.variable.load.indirect %120 : !iree.ptr<tensor<1x1x128x512xf32>> -> tensor<1x1x128x512xf32> - %458 = flow.variable.load.indirect %131 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %459 = flow.variable.load.indirect %130 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %460 = flow.variable.load.indirect %129 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %461 = flow.variable.load.indirect %128 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %462 = flow.variable.load.indirect %127 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %463 = flow.variable.load.indirect %126 : !iree.ptr<tensor<1x1x512x128xf32>> -> tensor<1x1x512x128xf32> - %464 = flow.variable.load.indirect %137 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %465 = flow.variable.load.indirect %136 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %466 = flow.variable.load.indirect %135 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %467 = flow.variable.load.indirect %134 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %468 = flow.variable.load.indirect %133 : !iree.ptr<tensor<128xf32>> -> tensor<128xf32> - %469 = flow.variable.load.indirect %132 : !iree.ptr<tensor<3x3x128x128xf32>> -> tensor<3x3x128x128xf32> - %470 = flow.variable.load.indirect %143 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %471 = flow.variable.load.indirect %142 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %472 = flow.variable.load.indirect %141 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %473 = flow.variable.load.indirect %140 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %474 = flow.variable.load.indirect %139 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %475 = flow.variable.load.indirect %138 : !iree.ptr<tensor<1x1x128x512xf32>> -> tensor<1x1x128x512xf32> - %476 = flow.variable.load.indirect %163 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %477 = flow.variable.load.indirect %162 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %478 = flow.variable.load.indirect %161 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %479 = flow.variable.load.indirect %160 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %480 = flow.variable.load.indirect %159 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %481 = flow.variable.load.indirect %158 : !iree.ptr<tensor<1x1x512x1024xf32>> -> tensor<1x1x512x1024xf32> - %482 = flow.variable.load.indirect %149 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %483 = flow.variable.load.indirect %148 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %484 = flow.variable.load.indirect %147 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %485 = flow.variable.load.indirect %146 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %486 = flow.variable.load.indirect %145 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %487 = flow.variable.load.indirect %144 : !iree.ptr<tensor<1x1x512x256xf32>> -> tensor<1x1x512x256xf32> - %488 = flow.variable.load.indirect %155 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %489 = flow.variable.load.indirect %154 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %490 = flow.variable.load.indirect %153 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %491 = flow.variable.load.indirect %152 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %492 = flow.variable.load.indirect %151 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %493 = flow.variable.load.indirect %150 : !iree.ptr<tensor<3x3x256x256xf32>> -> tensor<3x3x256x256xf32> - %494 = flow.variable.load.indirect %167 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %495 = flow.variable.load.indirect %166 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %496 = flow.variable.load.indirect %165 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %497 = flow.variable.load.indirect %164 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %498 = flow.variable.load.indirect %157 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %499 = flow.variable.load.indirect %156 : !iree.ptr<tensor<1x1x256x1024xf32>> -> tensor<1x1x256x1024xf32> - %500 = flow.variable.load.indirect %173 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %501 = flow.variable.load.indirect %172 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %502 = flow.variable.load.indirect %171 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %503 = flow.variable.load.indirect %170 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %504 = flow.variable.load.indirect %169 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %505 = flow.variable.load.indirect %168 : !iree.ptr<tensor<1x1x1024x256xf32>> -> tensor<1x1x1024x256xf32> - %506 = flow.variable.load.indirect %179 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %507 = flow.variable.load.indirect %178 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %508 = flow.variable.load.indirect %177 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %509 = flow.variable.load.indirect %176 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %510 = flow.variable.load.indirect %175 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %511 = flow.variable.load.indirect %174 : !iree.ptr<tensor<3x3x256x256xf32>> -> tensor<3x3x256x256xf32> - %512 = flow.variable.load.indirect %185 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %513 = flow.variable.load.indirect %184 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %514 = flow.variable.load.indirect %183 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %515 = flow.variable.load.indirect %182 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %516 = flow.variable.load.indirect %181 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %517 = flow.variable.load.indirect %180 : !iree.ptr<tensor<1x1x256x1024xf32>> -> tensor<1x1x256x1024xf32> - %518 = flow.variable.load.indirect %191 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %519 = flow.variable.load.indirect %190 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %520 = flow.variable.load.indirect %189 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %521 = flow.variable.load.indirect %188 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %522 = flow.variable.load.indirect %187 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %523 = flow.variable.load.indirect %186 : !iree.ptr<tensor<1x1x1024x256xf32>> -> tensor<1x1x1024x256xf32> - %524 = flow.variable.load.indirect %197 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %525 = flow.variable.load.indirect %196 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %526 = flow.variable.load.indirect %195 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %527 = flow.variable.load.indirect %194 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %528 = flow.variable.load.indirect %193 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %529 = flow.variable.load.indirect %192 : !iree.ptr<tensor<3x3x256x256xf32>> -> tensor<3x3x256x256xf32> - %530 = flow.variable.load.indirect %203 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %531 = flow.variable.load.indirect %202 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %532 = flow.variable.load.indirect %201 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %533 = flow.variable.load.indirect %200 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %534 = flow.variable.load.indirect %199 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %535 = flow.variable.load.indirect %198 : !iree.ptr<tensor<1x1x256x1024xf32>> -> tensor<1x1x256x1024xf32> - %536 = flow.variable.load.indirect %209 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %537 = flow.variable.load.indirect %208 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %538 = flow.variable.load.indirect %207 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %539 = flow.variable.load.indirect %206 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %540 = flow.variable.load.indirect %205 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %541 = flow.variable.load.indirect %204 : !iree.ptr<tensor<1x1x1024x256xf32>> -> tensor<1x1x1024x256xf32> - %542 = flow.variable.load.indirect %215 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %543 = flow.variable.load.indirect %214 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %544 = flow.variable.load.indirect %213 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %545 = flow.variable.load.indirect %212 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %546 = flow.variable.load.indirect %211 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %547 = flow.variable.load.indirect %210 : !iree.ptr<tensor<3x3x256x256xf32>> -> tensor<3x3x256x256xf32> - %548 = flow.variable.load.indirect %221 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %549 = flow.variable.load.indirect %220 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %550 = flow.variable.load.indirect %219 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %551 = flow.variable.load.indirect %218 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %552 = flow.variable.load.indirect %217 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %553 = flow.variable.load.indirect %216 : !iree.ptr<tensor<1x1x256x1024xf32>> -> tensor<1x1x256x1024xf32> - %554 = flow.variable.load.indirect %227 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %555 = flow.variable.load.indirect %226 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %556 = flow.variable.load.indirect %225 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %557 = flow.variable.load.indirect %224 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %558 = flow.variable.load.indirect %223 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %559 = flow.variable.load.indirect %222 : !iree.ptr<tensor<1x1x1024x256xf32>> -> tensor<1x1x1024x256xf32> - %560 = flow.variable.load.indirect %233 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %561 = flow.variable.load.indirect %232 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %562 = flow.variable.load.indirect %231 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %563 = flow.variable.load.indirect %230 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %564 = flow.variable.load.indirect %229 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %565 = flow.variable.load.indirect %228 : !iree.ptr<tensor<3x3x256x256xf32>> -> tensor<3x3x256x256xf32> - %566 = flow.variable.load.indirect %239 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %567 = flow.variable.load.indirect %238 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %568 = flow.variable.load.indirect %237 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %569 = flow.variable.load.indirect %236 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %570 = flow.variable.load.indirect %235 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %571 = flow.variable.load.indirect %234 : !iree.ptr<tensor<1x1x256x1024xf32>> -> tensor<1x1x256x1024xf32> - %572 = flow.variable.load.indirect %245 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %573 = flow.variable.load.indirect %244 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %574 = flow.variable.load.indirect %243 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %575 = flow.variable.load.indirect %242 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %576 = flow.variable.load.indirect %241 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %577 = flow.variable.load.indirect %240 : !iree.ptr<tensor<1x1x1024x256xf32>> -> tensor<1x1x1024x256xf32> - %578 = flow.variable.load.indirect %251 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %579 = flow.variable.load.indirect %250 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %580 = flow.variable.load.indirect %249 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %581 = flow.variable.load.indirect %248 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %582 = flow.variable.load.indirect %247 : !iree.ptr<tensor<256xf32>> -> tensor<256xf32> - %583 = flow.variable.load.indirect %246 : !iree.ptr<tensor<3x3x256x256xf32>> -> tensor<3x3x256x256xf32> - %584 = flow.variable.load.indirect %257 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %585 = flow.variable.load.indirect %256 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %586 = flow.variable.load.indirect %255 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %587 = flow.variable.load.indirect %254 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %588 = flow.variable.load.indirect %253 : !iree.ptr<tensor<1024xf32>> -> tensor<1024xf32> - %589 = flow.variable.load.indirect %252 : !iree.ptr<tensor<1x1x256x1024xf32>> -> tensor<1x1x256x1024xf32> - %590 = flow.variable.load.indirect %277 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %591 = flow.variable.load.indirect %276 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %592 = flow.variable.load.indirect %275 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %593 = flow.variable.load.indirect %274 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %594 = flow.variable.load.indirect %273 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %595 = flow.variable.load.indirect %272 : !iree.ptr<tensor<1x1x1024x2048xf32>> -> tensor<1x1x1024x2048xf32> - %596 = flow.variable.load.indirect %263 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %597 = flow.variable.load.indirect %262 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %598 = flow.variable.load.indirect %261 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %599 = flow.variable.load.indirect %260 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %600 = flow.variable.load.indirect %259 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %601 = flow.variable.load.indirect %258 : !iree.ptr<tensor<1x1x1024x512xf32>> -> tensor<1x1x1024x512xf32> - %602 = flow.variable.load.indirect %269 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %603 = flow.variable.load.indirect %268 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %604 = flow.variable.load.indirect %267 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %605 = flow.variable.load.indirect %266 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %606 = flow.variable.load.indirect %265 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %607 = flow.variable.load.indirect %264 : !iree.ptr<tensor<3x3x512x512xf32>> -> tensor<3x3x512x512xf32> - %608 = flow.variable.load.indirect %281 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %609 = flow.variable.load.indirect %280 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %610 = flow.variable.load.indirect %279 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %611 = flow.variable.load.indirect %278 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %612 = flow.variable.load.indirect %271 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %613 = flow.variable.load.indirect %270 : !iree.ptr<tensor<1x1x512x2048xf32>> -> tensor<1x1x512x2048xf32> - %614 = flow.variable.load.indirect %287 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %615 = flow.variable.load.indirect %286 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %616 = flow.variable.load.indirect %285 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %617 = flow.variable.load.indirect %284 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %618 = flow.variable.load.indirect %283 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %619 = flow.variable.load.indirect %282 : !iree.ptr<tensor<1x1x2048x512xf32>> -> tensor<1x1x2048x512xf32> - %620 = flow.variable.load.indirect %293 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %621 = flow.variable.load.indirect %292 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %622 = flow.variable.load.indirect %291 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %623 = flow.variable.load.indirect %290 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %624 = flow.variable.load.indirect %289 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %625 = flow.variable.load.indirect %288 : !iree.ptr<tensor<3x3x512x512xf32>> -> tensor<3x3x512x512xf32> - %626 = flow.variable.load.indirect %299 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %627 = flow.variable.load.indirect %298 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %628 = flow.variable.load.indirect %297 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %629 = flow.variable.load.indirect %296 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %630 = flow.variable.load.indirect %295 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %631 = flow.variable.load.indirect %294 : !iree.ptr<tensor<1x1x512x2048xf32>> -> tensor<1x1x512x2048xf32> - %632 = flow.variable.load.indirect %305 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %633 = flow.variable.load.indirect %304 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %634 = flow.variable.load.indirect %303 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %635 = flow.variable.load.indirect %302 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %636 = flow.variable.load.indirect %301 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %637 = flow.variable.load.indirect %300 : !iree.ptr<tensor<1x1x2048x512xf32>> -> tensor<1x1x2048x512xf32> - %638 = flow.variable.load.indirect %311 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %639 = flow.variable.load.indirect %310 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %640 = flow.variable.load.indirect %309 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %641 = flow.variable.load.indirect %308 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %642 = flow.variable.load.indirect %307 : !iree.ptr<tensor<512xf32>> -> tensor<512xf32> - %643 = flow.variable.load.indirect %306 : !iree.ptr<tensor<3x3x512x512xf32>> -> tensor<3x3x512x512xf32> - %644 = flow.variable.load.indirect %317 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %645 = flow.variable.load.indirect %316 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %646 = flow.variable.load.indirect %315 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %647 = flow.variable.load.indirect %314 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %648 = flow.variable.load.indirect %313 : !iree.ptr<tensor<2048xf32>> -> tensor<2048xf32> - %649 = flow.variable.load.indirect %312 : !iree.ptr<tensor<1x1x512x2048xf32>> -> tensor<1x1x512x2048xf32> - %650 = flow.variable.load.indirect %319 : !iree.ptr<tensor<1000xf32>> -> tensor<1000xf32> - %651 = flow.variable.load.indirect %318 : !iree.ptr<tensor<2048x1000xf32>> -> tensor<2048x1000xf32> + %332 = flow.variable.load.indirect %5 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %333 = flow.variable.load.indirect %4 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %334 = flow.variable.load.indirect %3 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %335 = flow.variable.load.indirect %2 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %336 = flow.variable.load.indirect %1 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %337 = flow.variable.load.indirect %0 : !util.ptr<tensor<7x7x3x64xf32>> -> tensor<7x7x3x64xf32> + %338 = flow.variable.load.indirect %25 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %339 = flow.variable.load.indirect %24 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %340 = flow.variable.load.indirect %23 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %341 = flow.variable.load.indirect %22 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %342 = flow.variable.load.indirect %21 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %343 = flow.variable.load.indirect %20 : !util.ptr<tensor<1x1x64x256xf32>> -> tensor<1x1x64x256xf32> + %344 = flow.variable.load.indirect %11 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %345 = flow.variable.load.indirect %10 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %346 = flow.variable.load.indirect %9 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %347 = flow.variable.load.indirect %8 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %348 = flow.variable.load.indirect %7 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %349 = flow.variable.load.indirect %6 : !util.ptr<tensor<1x1x64x64xf32>> -> tensor<1x1x64x64xf32> + %350 = flow.variable.load.indirect %17 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %351 = flow.variable.load.indirect %16 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %352 = flow.variable.load.indirect %15 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %353 = flow.variable.load.indirect %14 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %354 = flow.variable.load.indirect %13 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %355 = flow.variable.load.indirect %12 : !util.ptr<tensor<3x3x64x64xf32>> -> tensor<3x3x64x64xf32> + %356 = flow.variable.load.indirect %29 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %357 = flow.variable.load.indirect %28 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %358 = flow.variable.load.indirect %27 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %359 = flow.variable.load.indirect %26 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %360 = flow.variable.load.indirect %19 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %361 = flow.variable.load.indirect %18 : !util.ptr<tensor<1x1x64x256xf32>> -> tensor<1x1x64x256xf32> + %362 = flow.variable.load.indirect %35 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %363 = flow.variable.load.indirect %34 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %364 = flow.variable.load.indirect %33 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %365 = flow.variable.load.indirect %32 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %366 = flow.variable.load.indirect %31 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %367 = flow.variable.load.indirect %30 : !util.ptr<tensor<1x1x256x64xf32>> -> tensor<1x1x256x64xf32> + %368 = flow.variable.load.indirect %41 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %369 = flow.variable.load.indirect %40 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %370 = flow.variable.load.indirect %39 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %371 = flow.variable.load.indirect %38 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %372 = flow.variable.load.indirect %37 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %373 = flow.variable.load.indirect %36 : !util.ptr<tensor<3x3x64x64xf32>> -> tensor<3x3x64x64xf32> + %374 = flow.variable.load.indirect %47 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %375 = flow.variable.load.indirect %46 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %376 = flow.variable.load.indirect %45 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %377 = flow.variable.load.indirect %44 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %378 = flow.variable.load.indirect %43 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %379 = flow.variable.load.indirect %42 : !util.ptr<tensor<1x1x64x256xf32>> -> tensor<1x1x64x256xf32> + %380 = flow.variable.load.indirect %53 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %381 = flow.variable.load.indirect %52 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %382 = flow.variable.load.indirect %51 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %383 = flow.variable.load.indirect %50 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %384 = flow.variable.load.indirect %49 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %385 = flow.variable.load.indirect %48 : !util.ptr<tensor<1x1x256x64xf32>> -> tensor<1x1x256x64xf32> + %386 = flow.variable.load.indirect %59 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %387 = flow.variable.load.indirect %58 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %388 = flow.variable.load.indirect %57 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %389 = flow.variable.load.indirect %56 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %390 = flow.variable.load.indirect %55 : !util.ptr<tensor<64xf32>> -> tensor<64xf32> + %391 = flow.variable.load.indirect %54 : !util.ptr<tensor<3x3x64x64xf32>> -> tensor<3x3x64x64xf32> + %392 = flow.variable.load.indirect %65 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %393 = flow.variable.load.indirect %64 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %394 = flow.variable.load.indirect %63 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %395 = flow.variable.load.indirect %62 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %396 = flow.variable.load.indirect %61 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %397 = flow.variable.load.indirect %60 : !util.ptr<tensor<1x1x64x256xf32>> -> tensor<1x1x64x256xf32> + %398 = flow.variable.load.indirect %85 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %399 = flow.variable.load.indirect %84 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %400 = flow.variable.load.indirect %83 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %401 = flow.variable.load.indirect %82 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %402 = flow.variable.load.indirect %81 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %403 = flow.variable.load.indirect %80 : !util.ptr<tensor<1x1x256x512xf32>> -> tensor<1x1x256x512xf32> + %404 = flow.variable.load.indirect %71 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %405 = flow.variable.load.indirect %70 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %406 = flow.variable.load.indirect %69 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %407 = flow.variable.load.indirect %68 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %408 = flow.variable.load.indirect %67 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %409 = flow.variable.load.indirect %66 : !util.ptr<tensor<1x1x256x128xf32>> -> tensor<1x1x256x128xf32> + %410 = flow.variable.load.indirect %77 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %411 = flow.variable.load.indirect %76 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %412 = flow.variable.load.indirect %75 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %413 = flow.variable.load.indirect %74 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %414 = flow.variable.load.indirect %73 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %415 = flow.variable.load.indirect %72 : !util.ptr<tensor<3x3x128x128xf32>> -> tensor<3x3x128x128xf32> + %416 = flow.variable.load.indirect %89 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %417 = flow.variable.load.indirect %88 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %418 = flow.variable.load.indirect %87 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %419 = flow.variable.load.indirect %86 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %420 = flow.variable.load.indirect %79 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %421 = flow.variable.load.indirect %78 : !util.ptr<tensor<1x1x128x512xf32>> -> tensor<1x1x128x512xf32> + %422 = flow.variable.load.indirect %95 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %423 = flow.variable.load.indirect %94 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %424 = flow.variable.load.indirect %93 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %425 = flow.variable.load.indirect %92 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %426 = flow.variable.load.indirect %91 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %427 = flow.variable.load.indirect %90 : !util.ptr<tensor<1x1x512x128xf32>> -> tensor<1x1x512x128xf32> + %428 = flow.variable.load.indirect %101 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %429 = flow.variable.load.indirect %100 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %430 = flow.variable.load.indirect %99 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %431 = flow.variable.load.indirect %98 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %432 = flow.variable.load.indirect %97 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %433 = flow.variable.load.indirect %96 : !util.ptr<tensor<3x3x128x128xf32>> -> tensor<3x3x128x128xf32> + %434 = flow.variable.load.indirect %107 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %435 = flow.variable.load.indirect %106 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %436 = flow.variable.load.indirect %105 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %437 = flow.variable.load.indirect %104 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %438 = flow.variable.load.indirect %103 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %439 = flow.variable.load.indirect %102 : !util.ptr<tensor<1x1x128x512xf32>> -> tensor<1x1x128x512xf32> + %440 = flow.variable.load.indirect %113 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %441 = flow.variable.load.indirect %112 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %442 = flow.variable.load.indirect %111 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %443 = flow.variable.load.indirect %110 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %444 = flow.variable.load.indirect %109 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %445 = flow.variable.load.indirect %108 : !util.ptr<tensor<1x1x512x128xf32>> -> tensor<1x1x512x128xf32> + %446 = flow.variable.load.indirect %119 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %447 = flow.variable.load.indirect %118 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %448 = flow.variable.load.indirect %117 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %449 = flow.variable.load.indirect %116 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %450 = flow.variable.load.indirect %115 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %451 = flow.variable.load.indirect %114 : !util.ptr<tensor<3x3x128x128xf32>> -> tensor<3x3x128x128xf32> + %452 = flow.variable.load.indirect %125 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %453 = flow.variable.load.indirect %124 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %454 = flow.variable.load.indirect %123 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %455 = flow.variable.load.indirect %122 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %456 = flow.variable.load.indirect %121 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %457 = flow.variable.load.indirect %120 : !util.ptr<tensor<1x1x128x512xf32>> -> tensor<1x1x128x512xf32> + %458 = flow.variable.load.indirect %131 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %459 = flow.variable.load.indirect %130 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %460 = flow.variable.load.indirect %129 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %461 = flow.variable.load.indirect %128 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %462 = flow.variable.load.indirect %127 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %463 = flow.variable.load.indirect %126 : !util.ptr<tensor<1x1x512x128xf32>> -> tensor<1x1x512x128xf32> + %464 = flow.variable.load.indirect %137 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %465 = flow.variable.load.indirect %136 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %466 = flow.variable.load.indirect %135 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %467 = flow.variable.load.indirect %134 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %468 = flow.variable.load.indirect %133 : !util.ptr<tensor<128xf32>> -> tensor<128xf32> + %469 = flow.variable.load.indirect %132 : !util.ptr<tensor<3x3x128x128xf32>> -> tensor<3x3x128x128xf32> + %470 = flow.variable.load.indirect %143 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %471 = flow.variable.load.indirect %142 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %472 = flow.variable.load.indirect %141 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %473 = flow.variable.load.indirect %140 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %474 = flow.variable.load.indirect %139 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %475 = flow.variable.load.indirect %138 : !util.ptr<tensor<1x1x128x512xf32>> -> tensor<1x1x128x512xf32> + %476 = flow.variable.load.indirect %163 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %477 = flow.variable.load.indirect %162 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %478 = flow.variable.load.indirect %161 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %479 = flow.variable.load.indirect %160 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %480 = flow.variable.load.indirect %159 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %481 = flow.variable.load.indirect %158 : !util.ptr<tensor<1x1x512x1024xf32>> -> tensor<1x1x512x1024xf32> + %482 = flow.variable.load.indirect %149 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %483 = flow.variable.load.indirect %148 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %484 = flow.variable.load.indirect %147 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %485 = flow.variable.load.indirect %146 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %486 = flow.variable.load.indirect %145 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %487 = flow.variable.load.indirect %144 : !util.ptr<tensor<1x1x512x256xf32>> -> tensor<1x1x512x256xf32> + %488 = flow.variable.load.indirect %155 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %489 = flow.variable.load.indirect %154 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %490 = flow.variable.load.indirect %153 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %491 = flow.variable.load.indirect %152 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %492 = flow.variable.load.indirect %151 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %493 = flow.variable.load.indirect %150 : !util.ptr<tensor<3x3x256x256xf32>> -> tensor<3x3x256x256xf32> + %494 = flow.variable.load.indirect %167 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %495 = flow.variable.load.indirect %166 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %496 = flow.variable.load.indirect %165 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %497 = flow.variable.load.indirect %164 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %498 = flow.variable.load.indirect %157 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %499 = flow.variable.load.indirect %156 : !util.ptr<tensor<1x1x256x1024xf32>> -> tensor<1x1x256x1024xf32> + %500 = flow.variable.load.indirect %173 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %501 = flow.variable.load.indirect %172 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %502 = flow.variable.load.indirect %171 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %503 = flow.variable.load.indirect %170 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %504 = flow.variable.load.indirect %169 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %505 = flow.variable.load.indirect %168 : !util.ptr<tensor<1x1x1024x256xf32>> -> tensor<1x1x1024x256xf32> + %506 = flow.variable.load.indirect %179 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %507 = flow.variable.load.indirect %178 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %508 = flow.variable.load.indirect %177 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %509 = flow.variable.load.indirect %176 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %510 = flow.variable.load.indirect %175 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %511 = flow.variable.load.indirect %174 : !util.ptr<tensor<3x3x256x256xf32>> -> tensor<3x3x256x256xf32> + %512 = flow.variable.load.indirect %185 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %513 = flow.variable.load.indirect %184 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %514 = flow.variable.load.indirect %183 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %515 = flow.variable.load.indirect %182 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %516 = flow.variable.load.indirect %181 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %517 = flow.variable.load.indirect %180 : !util.ptr<tensor<1x1x256x1024xf32>> -> tensor<1x1x256x1024xf32> + %518 = flow.variable.load.indirect %191 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %519 = flow.variable.load.indirect %190 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %520 = flow.variable.load.indirect %189 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %521 = flow.variable.load.indirect %188 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %522 = flow.variable.load.indirect %187 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %523 = flow.variable.load.indirect %186 : !util.ptr<tensor<1x1x1024x256xf32>> -> tensor<1x1x1024x256xf32> + %524 = flow.variable.load.indirect %197 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %525 = flow.variable.load.indirect %196 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %526 = flow.variable.load.indirect %195 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %527 = flow.variable.load.indirect %194 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %528 = flow.variable.load.indirect %193 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %529 = flow.variable.load.indirect %192 : !util.ptr<tensor<3x3x256x256xf32>> -> tensor<3x3x256x256xf32> + %530 = flow.variable.load.indirect %203 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %531 = flow.variable.load.indirect %202 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %532 = flow.variable.load.indirect %201 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %533 = flow.variable.load.indirect %200 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %534 = flow.variable.load.indirect %199 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %535 = flow.variable.load.indirect %198 : !util.ptr<tensor<1x1x256x1024xf32>> -> tensor<1x1x256x1024xf32> + %536 = flow.variable.load.indirect %209 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %537 = flow.variable.load.indirect %208 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %538 = flow.variable.load.indirect %207 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %539 = flow.variable.load.indirect %206 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %540 = flow.variable.load.indirect %205 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %541 = flow.variable.load.indirect %204 : !util.ptr<tensor<1x1x1024x256xf32>> -> tensor<1x1x1024x256xf32> + %542 = flow.variable.load.indirect %215 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %543 = flow.variable.load.indirect %214 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %544 = flow.variable.load.indirect %213 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %545 = flow.variable.load.indirect %212 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %546 = flow.variable.load.indirect %211 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %547 = flow.variable.load.indirect %210 : !util.ptr<tensor<3x3x256x256xf32>> -> tensor<3x3x256x256xf32> + %548 = flow.variable.load.indirect %221 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %549 = flow.variable.load.indirect %220 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %550 = flow.variable.load.indirect %219 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %551 = flow.variable.load.indirect %218 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %552 = flow.variable.load.indirect %217 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %553 = flow.variable.load.indirect %216 : !util.ptr<tensor<1x1x256x1024xf32>> -> tensor<1x1x256x1024xf32> + %554 = flow.variable.load.indirect %227 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %555 = flow.variable.load.indirect %226 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %556 = flow.variable.load.indirect %225 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %557 = flow.variable.load.indirect %224 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %558 = flow.variable.load.indirect %223 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %559 = flow.variable.load.indirect %222 : !util.ptr<tensor<1x1x1024x256xf32>> -> tensor<1x1x1024x256xf32> + %560 = flow.variable.load.indirect %233 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %561 = flow.variable.load.indirect %232 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %562 = flow.variable.load.indirect %231 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %563 = flow.variable.load.indirect %230 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %564 = flow.variable.load.indirect %229 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %565 = flow.variable.load.indirect %228 : !util.ptr<tensor<3x3x256x256xf32>> -> tensor<3x3x256x256xf32> + %566 = flow.variable.load.indirect %239 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %567 = flow.variable.load.indirect %238 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %568 = flow.variable.load.indirect %237 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %569 = flow.variable.load.indirect %236 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %570 = flow.variable.load.indirect %235 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %571 = flow.variable.load.indirect %234 : !util.ptr<tensor<1x1x256x1024xf32>> -> tensor<1x1x256x1024xf32> + %572 = flow.variable.load.indirect %245 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %573 = flow.variable.load.indirect %244 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %574 = flow.variable.load.indirect %243 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %575 = flow.variable.load.indirect %242 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %576 = flow.variable.load.indirect %241 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %577 = flow.variable.load.indirect %240 : !util.ptr<tensor<1x1x1024x256xf32>> -> tensor<1x1x1024x256xf32> + %578 = flow.variable.load.indirect %251 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %579 = flow.variable.load.indirect %250 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %580 = flow.variable.load.indirect %249 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %581 = flow.variable.load.indirect %248 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %582 = flow.variable.load.indirect %247 : !util.ptr<tensor<256xf32>> -> tensor<256xf32> + %583 = flow.variable.load.indirect %246 : !util.ptr<tensor<3x3x256x256xf32>> -> tensor<3x3x256x256xf32> + %584 = flow.variable.load.indirect %257 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %585 = flow.variable.load.indirect %256 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %586 = flow.variable.load.indirect %255 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %587 = flow.variable.load.indirect %254 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %588 = flow.variable.load.indirect %253 : !util.ptr<tensor<1024xf32>> -> tensor<1024xf32> + %589 = flow.variable.load.indirect %252 : !util.ptr<tensor<1x1x256x1024xf32>> -> tensor<1x1x256x1024xf32> + %590 = flow.variable.load.indirect %277 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %591 = flow.variable.load.indirect %276 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %592 = flow.variable.load.indirect %275 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %593 = flow.variable.load.indirect %274 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %594 = flow.variable.load.indirect %273 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %595 = flow.variable.load.indirect %272 : !util.ptr<tensor<1x1x1024x2048xf32>> -> tensor<1x1x1024x2048xf32> + %596 = flow.variable.load.indirect %263 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %597 = flow.variable.load.indirect %262 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %598 = flow.variable.load.indirect %261 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %599 = flow.variable.load.indirect %260 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %600 = flow.variable.load.indirect %259 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %601 = flow.variable.load.indirect %258 : !util.ptr<tensor<1x1x1024x512xf32>> -> tensor<1x1x1024x512xf32> + %602 = flow.variable.load.indirect %269 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %603 = flow.variable.load.indirect %268 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %604 = flow.variable.load.indirect %267 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %605 = flow.variable.load.indirect %266 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %606 = flow.variable.load.indirect %265 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %607 = flow.variable.load.indirect %264 : !util.ptr<tensor<3x3x512x512xf32>> -> tensor<3x3x512x512xf32> + %608 = flow.variable.load.indirect %281 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %609 = flow.variable.load.indirect %280 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %610 = flow.variable.load.indirect %279 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %611 = flow.variable.load.indirect %278 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %612 = flow.variable.load.indirect %271 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %613 = flow.variable.load.indirect %270 : !util.ptr<tensor<1x1x512x2048xf32>> -> tensor<1x1x512x2048xf32> + %614 = flow.variable.load.indirect %287 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %615 = flow.variable.load.indirect %286 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %616 = flow.variable.load.indirect %285 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %617 = flow.variable.load.indirect %284 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %618 = flow.variable.load.indirect %283 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %619 = flow.variable.load.indirect %282 : !util.ptr<tensor<1x1x2048x512xf32>> -> tensor<1x1x2048x512xf32> + %620 = flow.variable.load.indirect %293 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %621 = flow.variable.load.indirect %292 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %622 = flow.variable.load.indirect %291 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %623 = flow.variable.load.indirect %290 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %624 = flow.variable.load.indirect %289 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %625 = flow.variable.load.indirect %288 : !util.ptr<tensor<3x3x512x512xf32>> -> tensor<3x3x512x512xf32> + %626 = flow.variable.load.indirect %299 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %627 = flow.variable.load.indirect %298 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %628 = flow.variable.load.indirect %297 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %629 = flow.variable.load.indirect %296 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %630 = flow.variable.load.indirect %295 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %631 = flow.variable.load.indirect %294 : !util.ptr<tensor<1x1x512x2048xf32>> -> tensor<1x1x512x2048xf32> + %632 = flow.variable.load.indirect %305 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %633 = flow.variable.load.indirect %304 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %634 = flow.variable.load.indirect %303 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %635 = flow.variable.load.indirect %302 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %636 = flow.variable.load.indirect %301 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %637 = flow.variable.load.indirect %300 : !util.ptr<tensor<1x1x2048x512xf32>> -> tensor<1x1x2048x512xf32> + %638 = flow.variable.load.indirect %311 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %639 = flow.variable.load.indirect %310 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %640 = flow.variable.load.indirect %309 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %641 = flow.variable.load.indirect %308 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %642 = flow.variable.load.indirect %307 : !util.ptr<tensor<512xf32>> -> tensor<512xf32> + %643 = flow.variable.load.indirect %306 : !util.ptr<tensor<3x3x512x512xf32>> -> tensor<3x3x512x512xf32> + %644 = flow.variable.load.indirect %317 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %645 = flow.variable.load.indirect %316 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %646 = flow.variable.load.indirect %315 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %647 = flow.variable.load.indirect %314 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %648 = flow.variable.load.indirect %313 : !util.ptr<tensor<2048xf32>> -> tensor<2048xf32> + %649 = flow.variable.load.indirect %312 : !util.ptr<tensor<1x1x512x2048xf32>> -> tensor<1x1x512x2048xf32> + %650 = flow.variable.load.indirect %319 : !util.ptr<tensor<1000xf32>> -> tensor<1000xf32> + %651 = flow.variable.load.indirect %318 : !util.ptr<tensor<2048x1000xf32>> -> tensor<2048x1000xf32> %652 = "mhlo.pad"(%arg0, %331) {edge_padding_high = dense<[0, 3, 3, 0]> : tensor<4xi64>, edge_padding_low = dense<[0, 3, 3, 0]> : tensor<4xi64>, interior_padding = dense<0> : tensor<4xi64>} : (tensor<1x224x224x3xf32>, tensor<f32>) -> tensor<1x230x230x3xf32> %653 = "mhlo.convolution"(%652, %337) {batch_group_count = 1 : i64, dimension_numbers = {input_batch_dimension = 0 : i64, input_feature_dimension = 3 : i64, input_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>, kernel_input_feature_dimension = 2 : i64, kernel_output_feature_dimension = 3 : i64, kernel_spatial_dimensions = dense<[0, 1]> : tensor<2xi64>, output_batch_dimension = 0 : i64, output_feature_dimension = 3 : i64, output_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>}, feature_group_count = 1 : i64, padding = dense<0> : tensor<2x2xi64>, rhs_dilation = dense<1> : tensor<2xi64>, window_strides = dense<2> : tensor<2xi64>} : (tensor<1x230x230x3xf32>, tensor<7x7x3x64xf32>) -> tensor<1x112x112x64xf32> %654 = "mhlo.broadcast_in_dim"(%336) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<64xf32>) -> tensor<1x112x112x64xf32>
diff --git a/iree/test/e2e/regression/BUILD b/iree/test/e2e/regression/BUILD index 2dd099d..0946c8e 100644 --- a/iree/test/e2e/regression/BUILD +++ b/iree/test/e2e/regression/BUILD
@@ -35,6 +35,7 @@ "dynamic_torch_index_select_vector.mlir", "globals.mlir", "scalar.mlir", + "tensor_cast.mlir", "trace_dispatch_tensors.mlir", "unused_args.mlir", ],
diff --git a/iree/test/e2e/regression/CMakeLists.txt b/iree/test/e2e/regression/CMakeLists.txt index b15bafa..3e6bc37 100644 --- a/iree/test/e2e/regression/CMakeLists.txt +++ b/iree/test/e2e/regression/CMakeLists.txt
@@ -23,6 +23,7 @@ "dynamic_torch_index_select_vector.mlir" "globals.mlir" "scalar.mlir" + "tensor_cast.mlir" "trace_dispatch_tensors.mlir" "unused_args.mlir" DATA
diff --git a/iree/test/e2e/regression/dynamic_abs.mlir b/iree/test/e2e/regression/dynamic_abs.mlir index 6192986..044e279 100644 --- a/iree/test/e2e/regression/dynamic_abs.mlir +++ b/iree/test/e2e/regression/dynamic_abs.mlir
@@ -2,7 +2,7 @@ // CHECK-LABEL: EXEC @dynamic_tensor func @dynamic_tensor() -> tensor<?x?xf32> { - %input = iree.dynamic_shape_constant dense<[[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0]]> : tensor<2x3xf32> -> tensor<?x?xf32> + %input = util.dynamic_shape_constant dense<[[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0]]> : tensor<2x3xf32> -> tensor<?x?xf32> %res = "mhlo.abs"(%input) : (tensor<?x?xf32>) -> tensor<?x?xf32> return %res : tensor<?x?xf32> }
diff --git a/iree/test/e2e/regression/dynamic_dot.mlir b/iree/test/e2e/regression/dynamic_dot.mlir index 90ff1e6..6ec4153 100644 --- a/iree/test/e2e/regression/dynamic_dot.mlir +++ b/iree/test/e2e/regression/dynamic_dot.mlir
@@ -2,13 +2,13 @@ // CHECK-LABEL: EXEC @dynamic_dot func @dynamic_dot() -> tensor<?x?xf32> { - %lhs = iree.dynamic_shape_constant dense<[ + %lhs = util.dynamic_shape_constant dense<[ [15.0, 14.0, 13.0], [12.0, 11.0, 10.0], [09.0, 08.0, 07.0], [06.0, 05.0, 04.0], [03.0, 02.0, 01.0]]> : tensor<5x3xf32> -> tensor<?x?xf32> - %rhs = iree.dynamic_shape_constant dense<[ + %rhs = util.dynamic_shape_constant dense<[ [15.0, 14.0, 13.0, 12.0, 11.0], [10.0, 09.0, 08.0, 07.0, 06.0], [05.0, 04.0, 03.0, 02.0, 01.0]]> : tensor<3x5xf32> -> tensor<?x?xf32>
diff --git a/iree/test/e2e/regression/linalg_ext_ops.mlir b/iree/test/e2e/regression/linalg_ext_ops.mlir index 1cebe9a..39fb1aa 100644 --- a/iree/test/e2e/regression/linalg_ext_ops.mlir +++ b/iree/test/e2e/regression/linalg_ext_ops.mlir
@@ -1,5 +1,5 @@ func @sort2D() { - %input = iree.unfoldable_constant dense<[[5, 6], [3, 7]]> : tensor<2x2xi32> + %input = util.unfoldable_constant dense<[[5, 6], [3, 7]]> : tensor<2x2xi32> %0 = linalg_ext.sort dimension(0) outs(%input : tensor<2x2xi32>) { ^bb0(%arg2: i32, %arg3: i32): // no predecessors %1 = cmpi slt, %arg2, %arg3 : i32
diff --git a/iree/test/e2e/regression/linalg_ops.mlir b/iree/test/e2e/regression/linalg_ops.mlir index 0c92558..4a0660a 100644 --- a/iree/test/e2e/regression/linalg_ops.mlir +++ b/iree/test/e2e/regression/linalg_ops.mlir
@@ -1,9 +1,9 @@ func @multi_result() { - %input1 = iree.unfoldable_constant dense<[ + %input1 = util.unfoldable_constant dense<[ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]> : tensor<3x4xi32> - %input2 = iree.unfoldable_constant dense<[ + %input2 = util.unfoldable_constant dense<[ [13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]> : tensor<3x4xi32>
diff --git a/iree/test/e2e/regression/scalar.mlir b/iree/test/e2e/regression/scalar.mlir index 3ee5510..538ac52 100644 --- a/iree/test/e2e/regression/scalar.mlir +++ b/iree/test/e2e/regression/scalar.mlir
@@ -4,7 +4,7 @@ // CHECK-LABEL: EXEC @scalar func @scalar() -> i32 { - %result = iree.unfoldable_constant 42 : i32 + %result = util.unfoldable_constant 42 : i32 return %result : i32 } // CHECK: i32=42
diff --git a/iree/test/e2e/regression/tensor_cast.mlir b/iree/test/e2e/regression/tensor_cast.mlir new file mode 100644 index 0000000..b9d4711 --- /dev/null +++ b/iree/test/e2e/regression/tensor_cast.mlir
@@ -0,0 +1,10 @@ +// RUN: iree-run-mlir -iree-hal-target-backends=vmvx %s | IreeFileCheck %s +// RUN: [[ $IREE_LLVMAOT_DISABLE == 1 ]] || (iree-run-mlir -iree-hal-target-backends=dylib-llvm-aot %s | IreeFileCheck %s) +// RUN: [[ $IREE_VULKAN_DISABLE == 1 ]] || (iree-run-mlir -iree-hal-target-backends=vulkan-spirv %s | IreeFileCheck %s) + +func @tensor_cast() -> tensor<2x?xf32> { + %input = util.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> + %result = tensor.cast %input : tensor<2x3xf32> to tensor<2x?xf32> + return %result : tensor<2x?xf32> +} +// CHECK: 2x3xf32=[1 2 3][4 5 6]
diff --git a/iree/test/e2e/regression/trace_dispatch_tensors.mlir b/iree/test/e2e/regression/trace_dispatch_tensors.mlir index a4db90a..024274a 100644 --- a/iree/test/e2e/regression/trace_dispatch_tensors.mlir +++ b/iree/test/e2e/regression/trace_dispatch_tensors.mlir
@@ -1,8 +1,8 @@ // RUN: iree-run-mlir --iree-input-type=mhlo -iree-hal-target-backends=vmvx -iree-flow-trace-dispatch-tensors2 %s 2>&1 | IreeFileCheck %s func @two_dispatch() -> (tensor<5x5xf32>, tensor<3x5xf32>) { - %0 = iree.unfoldable_constant dense<1.0> : tensor<5x3xf32> - %1 = iree.unfoldable_constant dense<0.4> : tensor<3x5xf32> + %0 = util.unfoldable_constant dense<1.0> : tensor<5x3xf32> + %1 = util.unfoldable_constant dense<0.4> : tensor<3x5xf32> %2 = "mhlo.dot"(%0, %1) : (tensor<5x3xf32>, tensor<3x5xf32>) -> tensor<5x5xf32> %3 = "mhlo.dot"(%1, %2) : (tensor<3x5xf32>, tensor<5x5xf32>) -> tensor<3x5xf32> return %2, %3 : tensor<5x5xf32>, tensor<3x5xf32>
diff --git a/iree/test/e2e/tosa_ops/abs.mlir b/iree/test/e2e/tosa_ops/abs.mlir index 4fcf10b..6a43214 100644 --- a/iree/test/e2e/tosa_ops/abs.mlir +++ b/iree/test/e2e/tosa_ops/abs.mlir
@@ -1,5 +1,5 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[-1.0, -0.5, 0.0, 1.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[-1.0, -0.5, 0.0, 1.0]> : tensor<4xf32> %result = "tosa.abs"(%0) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[1.0, 0.5, 0.0, 1.0]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/tosa_ops/add.mlir b/iree/test/e2e/tosa_ops/add.mlir index 27382ae..8ae5dcd 100644 --- a/iree/test/e2e/tosa_ops/add.mlir +++ b/iree/test/e2e/tosa_ops/add.mlir
@@ -1,14 +1,14 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> - %1 = iree.unfoldable_constant dense<[5.0, 6.0, 7.0, 8.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> + %1 = util.unfoldable_constant dense<[5.0, 6.0, 7.0, 8.0]> : tensor<4xf32> %result = "tosa.add"(%0, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[6.0, 8.0, 10.0, 12.0]> : tensor<4xf32>) : tensor<4xf32> return } func @tensor_int() { - %0 = iree.unfoldable_constant dense<[1, 2, 3, 4]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[5, 6, 7, 8]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[1, 2, 3, 4]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[5, 6, 7, 8]> : tensor<4xi32> %result = "tosa.add"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[6, 8, 10, 12]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/arithmetic_right_shift.mlir b/iree/test/e2e/tosa_ops/arithmetic_right_shift.mlir index 09ac36e..d07c49f 100644 --- a/iree/test/e2e/tosa_ops/arithmetic_right_shift.mlir +++ b/iree/test/e2e/tosa_ops/arithmetic_right_shift.mlir
@@ -1,14 +1,14 @@ func @no_round() { - %0 = iree.unfoldable_constant dense<[5, 8, -1, 7]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[0, 1, 3, 1]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[5, 8, -1, 7]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[0, 1, 3, 1]> : tensor<4xi32> %result = "tosa.arithmetic_right_shift"(%0, %1) {round = 0 : i1} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[5, 4, -1, 3]> : tensor<4xi32>) : tensor<4xi32> return } func @with_round() { - %0 = iree.unfoldable_constant dense<[5, 8, -1, 7]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[0, 1, 3, 1]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[5, 8, -1, 7]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[0, 1, 3, 1]> : tensor<4xi32> %result = "tosa.arithmetic_right_shift"(%0, %1) {round = 1 : i1} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[5, 4, 0, 4]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/bitwise_and.mlir b/iree/test/e2e/tosa_ops/bitwise_and.mlir index 640217f..e53098e 100644 --- a/iree/test/e2e/tosa_ops/bitwise_and.mlir +++ b/iree/test/e2e/tosa_ops/bitwise_and.mlir
@@ -1,6 +1,6 @@ func @tensor() { - %0 = iree.unfoldable_constant dense<[0x0, 0x011, 0x101, 0x111]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[0x0, 0x010, 0x111, 0x000]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[0x0, 0x011, 0x101, 0x111]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[0x0, 0x010, 0x111, 0x000]> : tensor<4xi32> %result = "tosa.bitwise_and"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[0x0, 0x010, 0x101, 0x000]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/bitwise_or.mlir b/iree/test/e2e/tosa_ops/bitwise_or.mlir index e0b75f4..b9d74ab 100644 --- a/iree/test/e2e/tosa_ops/bitwise_or.mlir +++ b/iree/test/e2e/tosa_ops/bitwise_or.mlir
@@ -1,6 +1,6 @@ func @tensor() { - %0 = iree.unfoldable_constant dense<[0x0, 0x11, 0x1101, 0x111]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[0x0, 0x10, 0x0111, 0x111]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[0x0, 0x11, 0x1101, 0x111]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[0x0, 0x10, 0x0111, 0x111]> : tensor<4xi32> %result = "tosa.bitwise_or"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[0x0, 0x11, 0x1111, 0x111]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/bitwise_xor.mlir b/iree/test/e2e/tosa_ops/bitwise_xor.mlir index e0bdc11..830c27c 100644 --- a/iree/test/e2e/tosa_ops/bitwise_xor.mlir +++ b/iree/test/e2e/tosa_ops/bitwise_xor.mlir
@@ -1,6 +1,6 @@ func @tensor() { - %0 = iree.unfoldable_constant dense<[0x0, 0x11, 0x1101, 0x111]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[0x0, 0x10, 0x0111, 0x000]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[0x0, 0x11, 0x1101, 0x111]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[0x0, 0x10, 0x0111, 0x000]> : tensor<4xi32> %result = "tosa.bitwise_xor"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[0x0, 0x01, 0x1010, 0x111]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/ceil.mlir b/iree/test/e2e/tosa_ops/ceil.mlir index e34337e..fc8bc7b 100644 --- a/iree/test/e2e/tosa_ops/ceil.mlir +++ b/iree/test/e2e/tosa_ops/ceil.mlir
@@ -1,5 +1,5 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[0.0, -1.3, 1.3, -0.3]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[0.0, -1.3, 1.3, -0.3]> : tensor<4xf32> %result = "tosa.ceil"(%0) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[0.0, -1.0, 2.0, 0.0]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/tosa_ops/clamp.mlir b/iree/test/e2e/tosa_ops/clamp.mlir index 2928c90..86d5f06 100644 --- a/iree/test/e2e/tosa_ops/clamp.mlir +++ b/iree/test/e2e/tosa_ops/clamp.mlir
@@ -1,12 +1,12 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[1.0, 0.0, 4.5, 2.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[1.0, 0.0, 4.5, 2.0]> : tensor<4xf32> %result = "tosa.clamp"(%0) {min_int = 1 : i64, max_int = 4 : i64, min_fp = 1.0 : f32, max_fp = 4.0 : f32} : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[1.0, 1.0, 4.0, 2.0]> : tensor<4xf32>) : tensor<4xf32> return } func @tensor_int() { - %0 = iree.unfoldable_constant dense<[1, 0, 5, 2]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[1, 0, 5, 2]> : tensor<4xi32> %result = "tosa.clamp"(%0) {min_int = 1 : i64, max_int = 4 : i64, min_fp = 1.0 : f32, max_fp = 4.0 : f32} : (tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[1, 1, 4, 2]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/equal.mlir b/iree/test/e2e/tosa_ops/equal.mlir index 7f50e0e..f7bf6b0 100644 --- a/iree/test/e2e/tosa_ops/equal.mlir +++ b/iree/test/e2e/tosa_ops/equal.mlir
@@ -1,14 +1,14 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[1.0, -1.0, 0.0, 2.5]> : tensor<4xf32> - %1 = iree.unfoldable_constant dense<[1.0, 1.0, -0.0, 2.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[1.0, -1.0, 0.0, 2.5]> : tensor<4xf32> + %1 = util.unfoldable_constant dense<[1.0, 1.0, -0.0, 2.0]> : tensor<4xf32> %result = "tosa.equal"(%0, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1> check.expect_eq_const(%result, dense<[true, false, true, false]> : tensor<4xi1>) : tensor<4xi1> return } func @tensor_int() { - %0 = iree.unfoldable_constant dense<[1, 0, 1, 3]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[5, 0, 1, 8]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[1, 0, 1, 3]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[5, 0, 1, 8]> : tensor<4xi32> %result = "tosa.equal"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> check.expect_eq_const(%result, dense<[false, true, true, false]> : tensor<4xi1>) : tensor<4xi1> return
diff --git a/iree/test/e2e/tosa_ops/exp.mlir b/iree/test/e2e/tosa_ops/exp.mlir index af492a8..76d0ae3 100644 --- a/iree/test/e2e/tosa_ops/exp.mlir +++ b/iree/test/e2e/tosa_ops/exp.mlir
@@ -1,5 +1,5 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[0.0, 1.0, 0.5, 2.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[0.0, 1.0, 0.5, 2.0]> : tensor<4xf32> %result = "tosa.exp"(%0) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[1.0, 2.71828, 1.64872, 7.38906]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/tosa_ops/floor.mlir b/iree/test/e2e/tosa_ops/floor.mlir index f627747..ed1cf24 100644 --- a/iree/test/e2e/tosa_ops/floor.mlir +++ b/iree/test/e2e/tosa_ops/floor.mlir
@@ -1,5 +1,5 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[0.0, -1.3, 1.3, 0.3]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[0.0, -1.3, 1.3, 0.3]> : tensor<4xf32> %result = "tosa.floor"(%0) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[0.0, -2.0, 1.0, 0.0]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/tosa_ops/fully_connected.mlir b/iree/test/e2e/tosa_ops/fully_connected.mlir index e85e9fc..29bd516 100644 --- a/iree/test/e2e/tosa_ops/fully_connected.mlir +++ b/iree/test/e2e/tosa_ops/fully_connected.mlir
@@ -1,7 +1,7 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> - %1 = iree.unfoldable_constant dense<[[7.0, 8.0, 9.0]]> : tensor<1x3xf32> - %2 = iree.unfoldable_constant dense<[1.0]> : tensor<1xf32> + %0 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> + %1 = util.unfoldable_constant dense<[[7.0, 8.0, 9.0]]> : tensor<1x3xf32> + %2 = util.unfoldable_constant dense<[1.0]> : tensor<1xf32> %result = "tosa.fully_connected"(%0, %1, %2) : (tensor<2x3xf32>, tensor<1x3xf32>, tensor<1xf32>) -> tensor<2x1xf32> check.expect_eq_const(%result, dense<[[51.0], [123.0]]> : tensor<2x1xf32>) : tensor<2x1xf32> return
diff --git a/iree/test/e2e/tosa_ops/greater.mlir b/iree/test/e2e/tosa_ops/greater.mlir index 0c21a3f..4a81fd5 100644 --- a/iree/test/e2e/tosa_ops/greater.mlir +++ b/iree/test/e2e/tosa_ops/greater.mlir
@@ -1,14 +1,14 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[1.0, -1.5, 7.0, -2.0]> : tensor<4xf32> - %1 = iree.unfoldable_constant dense<[5.0, 1.0, 7.0, -3.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[1.0, -1.5, 7.0, -2.0]> : tensor<4xf32> + %1 = util.unfoldable_constant dense<[5.0, 1.0, 7.0, -3.0]> : tensor<4xf32> %result = "tosa.greater"(%0, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1> check.expect_eq_const(%result, dense<[false, false, false, true]> : tensor<4xi1>) : tensor<4xi1> return } func @tensor_int() { - %0 = iree.unfoldable_constant dense<[1, 0, 5, 3]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[5, 0, 1, 8]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[1, 0, 5, 3]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[5, 0, 1, 8]> : tensor<4xi32> %result = "tosa.greater"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> check.expect_eq_const(%result, dense<[false, false, true, false]> : tensor<4xi1>) : tensor<4xi1> return
diff --git a/iree/test/e2e/tosa_ops/greater_equal.mlir b/iree/test/e2e/tosa_ops/greater_equal.mlir index ab4d0c9..ad3c355 100644 --- a/iree/test/e2e/tosa_ops/greater_equal.mlir +++ b/iree/test/e2e/tosa_ops/greater_equal.mlir
@@ -1,14 +1,14 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[1.0, -1.5, 7.0, -2.0]> : tensor<4xf32> - %1 = iree.unfoldable_constant dense<[5.0, 1.0, 7.0, -3.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[1.0, -1.5, 7.0, -2.0]> : tensor<4xf32> + %1 = util.unfoldable_constant dense<[5.0, 1.0, 7.0, -3.0]> : tensor<4xf32> %result = "tosa.greater_equal"(%0, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1> check.expect_eq_const(%result, dense<[false, false, true, true]> : tensor<4xi1>) : tensor<4xi1> return } func @tensor_int() { - %0 = iree.unfoldable_constant dense<[1, 0, 5, 3]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[5, 0, 1, 8]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[1, 0, 5, 3]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[5, 0, 1, 8]> : tensor<4xi32> %result = "tosa.greater_equal"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> check.expect_eq_const(%result, dense<[false, true, true, false]> : tensor<4xi1>) : tensor<4xi1> return
diff --git a/iree/test/e2e/tosa_ops/if.mlir b/iree/test/e2e/tosa_ops/if.mlir index 6267178..7fc71ba 100644 --- a/iree/test/e2e/tosa_ops/if.mlir +++ b/iree/test/e2e/tosa_ops/if.mlir
@@ -1,11 +1,11 @@ func @if_true_test() { - %0 = iree.unfoldable_constant dense<true> : tensor<i1> - %1 = iree.unfoldable_constant dense<10> : tensor<i32> - %path = iree.unfoldable_constant 1 : i32 + %0 = util.unfoldable_constant dense<true> : tensor<i1> + %1 = util.unfoldable_constant dense<10> : tensor<i32> + %path = util.unfoldable_constant 1 : i32 %2 = "tosa.cond_if"(%0, %1) ( { ^bb0(%arg0 : tensor<i32>): check.expect_true(%path) : i32 - %3 = iree.unfoldable_constant dense<10> : tensor<i32> + %3 = util.unfoldable_constant dense<10> : tensor<i32> %4 = "tosa.add"(%arg0, %3) : (tensor<i32>, tensor<i32>) -> tensor<i32> "tosa.yield"(%4) : (tensor<i32>) -> () }, { @@ -18,9 +18,9 @@ } func @if_false_test() { - %0 = iree.unfoldable_constant dense<false> : tensor<i1> - %1 = iree.unfoldable_constant dense<10> : tensor<i32> - %path = iree.unfoldable_constant 0 : i32 + %0 = util.unfoldable_constant dense<false> : tensor<i1> + %1 = util.unfoldable_constant dense<10> : tensor<i32> + %path = util.unfoldable_constant 0 : i32 %2 = "tosa.cond_if"(%0, %1) ( { ^bb0(%arg0 : tensor<i32>): check.expect_true(%path) : i32 @@ -28,7 +28,7 @@ }, { ^bb0(%arg0 : tensor<i32>): check.expect_false(%path) : i32 - %3 = iree.unfoldable_constant dense<10> : tensor<i32> + %3 = util.unfoldable_constant dense<10> : tensor<i32> %4 = "tosa.add"(%arg0, %3) : (tensor<i32>, tensor<i32>) -> tensor<i32> "tosa.yield"(%4) : (tensor<i32>) -> () }) : (tensor<i1>, tensor<i32>) -> (tensor<i32>)
diff --git a/iree/test/e2e/tosa_ops/log.mlir b/iree/test/e2e/tosa_ops/log.mlir index 435c2bc..17f1d5f 100644 --- a/iree/test/e2e/tosa_ops/log.mlir +++ b/iree/test/e2e/tosa_ops/log.mlir
@@ -1,5 +1,5 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[1.0, 5.0, 0.5, 2.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[1.0, 5.0, 0.5, 2.0]> : tensor<4xf32> %result = "tosa.log"(%0) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[0.0, 1.60943, -0.693147, 0.693147]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/tosa_ops/logical_left_shift.mlir b/iree/test/e2e/tosa_ops/logical_left_shift.mlir index bbc810a..ddab394 100644 --- a/iree/test/e2e/tosa_ops/logical_left_shift.mlir +++ b/iree/test/e2e/tosa_ops/logical_left_shift.mlir
@@ -1,6 +1,6 @@ func @tensor() { - %0 = iree.unfoldable_constant dense<[5, 3, 2, 7]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[0, 1, 2, 3]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[5, 3, 2, 7]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[0, 1, 2, 3]> : tensor<4xi32> %result = "tosa.logical_left_shift"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[5, 6, 8, 56]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/logical_right_shift.mlir b/iree/test/e2e/tosa_ops/logical_right_shift.mlir index 3cb8436..474b493 100644 --- a/iree/test/e2e/tosa_ops/logical_right_shift.mlir +++ b/iree/test/e2e/tosa_ops/logical_right_shift.mlir
@@ -1,6 +1,6 @@ func @tensor() { - %0 = iree.unfoldable_constant dense<[5, 8, 9, 256]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[0, 1, 2, 8]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[5, 8, 9, 256]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[0, 1, 2, 8]> : tensor<4xi32> %result = "tosa.logical_right_shift"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[5, 4, 2, 1]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/matmul.mlir b/iree/test/e2e/tosa_ops/matmul.mlir index e5a034b..40e09eb 100644 --- a/iree/test/e2e/tosa_ops/matmul.mlir +++ b/iree/test/e2e/tosa_ops/matmul.mlir
@@ -1,14 +1,14 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> - %1 = iree.unfoldable_constant dense<[[7.0], [8.0], [9.0]]> : tensor<3x1xf32> + %0 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> + %1 = util.unfoldable_constant dense<[[7.0], [8.0], [9.0]]> : tensor<3x1xf32> %result = "tosa.matmul"(%0, %1) : (tensor<2x3xf32>, tensor<3x1xf32>) -> tensor<2x1xf32> check.expect_eq_const(%result, dense<[[50.0], [122.0]]> : tensor<2x1xf32>) : tensor<2x1xf32> return } func @tensor_int() { - %0 = iree.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> - %1 = iree.unfoldable_constant dense<[[7], [8], [9]]> : tensor<3x1xi32> + %0 = util.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> + %1 = util.unfoldable_constant dense<[[7], [8], [9]]> : tensor<3x1xi32> %result = "tosa.matmul"(%0, %1) : (tensor<2x3xi32>, tensor<3x1xi32>) -> tensor<2x1xi32> check.expect_eq_const(%result, dense<[[50], [122]]> : tensor<2x1xi32>) : tensor<2x1xi32> return
diff --git a/iree/test/e2e/tosa_ops/max_pool.mlir b/iree/test/e2e/tosa_ops/max_pool.mlir index af130e1..cfaf75c 100644 --- a/iree/test/e2e/tosa_ops/max_pool.mlir +++ b/iree/test/e2e/tosa_ops/max_pool.mlir
@@ -1,26 +1,26 @@ func @tensor_i8() { - %0 = iree.unfoldable_constant dense<[[[[1], [2], [3], [4]], [[5], [6], [7], [8]]]]> : tensor<1x2x4x1xi8> + %0 = util.unfoldable_constant dense<[[[[1], [2], [3], [4]], [[5], [6], [7], [8]]]]> : tensor<1x2x4x1xi8> %result = "tosa.max_pool2d"(%0) {kernel = [2, 2], stride = [1, 1], pad = [0, 0, 0, 0]} : (tensor<1x2x4x1xi8>) -> tensor<1x1x3x1xi8> check.expect_eq_const(%result, dense<[[[[6], [7], [8]]]]> : tensor<1x1x3x1xi8>) : tensor<1x1x3x1xi8> return } func @tensor_i16() { - %0 = iree.unfoldable_constant dense<[[[[1], [2], [3], [4]], [[5], [6], [7], [8]]]]> : tensor<1x2x4x1xi16> + %0 = util.unfoldable_constant dense<[[[[1], [2], [3], [4]], [[5], [6], [7], [8]]]]> : tensor<1x2x4x1xi16> %result = "tosa.max_pool2d"(%0) {kernel = [2, 2], stride = [1, 1], pad = [0, 0, 0, 0]} : (tensor<1x2x4x1xi16>) -> tensor<1x1x3x1xi16> check.expect_eq_const(%result, dense<[[[[6], [7], [8]]]]> : tensor<1x1x3x1xi16>) : tensor<1x1x3x1xi16> return } func @tensor_i32() { - %0 = iree.unfoldable_constant dense<[[[[1], [2], [3], [4]], [[5], [6], [7], [8]]]]> : tensor<1x2x4x1xi32> + %0 = util.unfoldable_constant dense<[[[[1], [2], [3], [4]], [[5], [6], [7], [8]]]]> : tensor<1x2x4x1xi32> %result = "tosa.max_pool2d"(%0) {kernel = [2, 2], stride = [1, 1], pad = [0, 0, 0, 0]} : (tensor<1x2x4x1xi32>) -> tensor<1x1x3x1xi32> check.expect_eq_const(%result, dense<[[[[6], [7], [8]]]]> : tensor<1x1x3x1xi32>) : tensor<1x1x3x1xi32> return } func @tensor_f32() { - %0 = iree.unfoldable_constant dense<[[[[1.], [2.], [3.], [4.]], [[5.], [6.], [7.], [8.]]]]> : tensor<1x2x4x1xf32> + %0 = util.unfoldable_constant dense<[[[[1.], [2.], [3.], [4.]], [[5.], [6.], [7.], [8.]]]]> : tensor<1x2x4x1xf32> %result = "tosa.max_pool2d"(%0) {kernel = [2, 2], stride = [1, 1], pad = [0, 0, 0, 0]} : (tensor<1x2x4x1xf32>) -> tensor<1x1x3x1xf32> check.expect_eq_const(%result, dense<[[[[6.], [7.], [8.]]]]> : tensor<1x1x3x1xf32>) : tensor<1x1x3x1xf32> return
diff --git a/iree/test/e2e/tosa_ops/maximum.mlir b/iree/test/e2e/tosa_ops/maximum.mlir index 90be28f..b3c954a 100644 --- a/iree/test/e2e/tosa_ops/maximum.mlir +++ b/iree/test/e2e/tosa_ops/maximum.mlir
@@ -1,14 +1,14 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[1.0, -1.5, 7.0, -2.0]> : tensor<4xf32> - %1 = iree.unfoldable_constant dense<[5.0, 1.0, 7.0, -3.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[1.0, -1.5, 7.0, -2.0]> : tensor<4xf32> + %1 = util.unfoldable_constant dense<[5.0, 1.0, 7.0, -3.0]> : tensor<4xf32> %result = "tosa.maximum"(%0, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> check.expect_eq_const(%result, dense<[5.0, 1.0, 7.0, -2.0]> : tensor<4xf32>) : tensor<4xf32> return } func @tensor_int() { - %0 = iree.unfoldable_constant dense<[1, 0, 5, 3]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[5, 0, 1, -8]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[1, 0, 5, 3]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[5, 0, 1, -8]> : tensor<4xi32> %result = "tosa.maximum"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[5, 0, 5, 3]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/minimum.mlir b/iree/test/e2e/tosa_ops/minimum.mlir index b5ea995..89a8f03 100644 --- a/iree/test/e2e/tosa_ops/minimum.mlir +++ b/iree/test/e2e/tosa_ops/minimum.mlir
@@ -1,14 +1,14 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[1.0, -1.5, 7.0, -2.0]> : tensor<4xf32> - %1 = iree.unfoldable_constant dense<[5.0, 1.0, 7.0, -3.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[1.0, -1.5, 7.0, -2.0]> : tensor<4xf32> + %1 = util.unfoldable_constant dense<[5.0, 1.0, 7.0, -3.0]> : tensor<4xf32> %result = "tosa.minimum"(%0, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> check.expect_eq_const(%result, dense<[1.0, -1.5, 7.0, -3.0]> : tensor<4xf32>) : tensor<4xf32> return } func @tensor_int() { - %0 = iree.unfoldable_constant dense<[1, 0, 5, 3]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[5, 0, 1, -8]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[1, 0, 5, 3]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[5, 0, 1, -8]> : tensor<4xi32> %result = "tosa.minimum"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[1, 0, 1, -8]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/mul.mlir b/iree/test/e2e/tosa_ops/mul.mlir index 33ccfbe..640b2da 100644 --- a/iree/test/e2e/tosa_ops/mul.mlir +++ b/iree/test/e2e/tosa_ops/mul.mlir
@@ -1,22 +1,22 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[1.0, 0.0, 3.0, 4.0]> : tensor<4xf32> - %1 = iree.unfoldable_constant dense<[5.0, 6.0, -3.0, 8.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[1.0, 0.0, 3.0, 4.0]> : tensor<4xf32> + %1 = util.unfoldable_constant dense<[5.0, 6.0, -3.0, 8.0]> : tensor<4xf32> %result = "tosa.mul"(%0, %1) {shift = 0 : i32} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[5.0, 0.0, -9.0, 32.0]> : tensor<4xf32>) : tensor<4xf32> return } func @tensor_int() { - %0 = iree.unfoldable_constant dense<[1, 0, 3, 4]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[5, 6, -3, 8]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[1, 0, 3, 4]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[5, 6, -3, 8]> : tensor<4xi32> %result = "tosa.mul"(%0, %1) {shift = 0 : i32} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[5, 0, -9, 32]> : tensor<4xi32>) : tensor<4xi32> return } func @tensor_int_shifted() { - %0 = iree.unfoldable_constant dense<[1, 0, 3, 4]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[5, 6, -3, 8]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[1, 0, 3, 4]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[5, 6, -3, 8]> : tensor<4xi32> %result = "tosa.mul"(%0, %1) {shift = 1 : i32} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[3, 0, -4, 16]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/negate.mlir b/iree/test/e2e/tosa_ops/negate.mlir index 7fe018b..0b4eeba 100644 --- a/iree/test/e2e/tosa_ops/negate.mlir +++ b/iree/test/e2e/tosa_ops/negate.mlir
@@ -1,12 +1,12 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[-1.0, -0.5, 0.0, 1.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[-1.0, -0.5, 0.0, 1.0]> : tensor<4xf32> %result = "tosa.negate"(%0) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[1.0, 0.5, 0.0, -1.0]> : tensor<4xf32>) : tensor<4xf32> return } func @tensor_int() { - %0 = iree.unfoldable_constant dense<[-1, 0, 3, 1]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[-1, 0, 3, 1]> : tensor<4xi32> %result = "tosa.negate"(%0) : (tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[1, 0, -3, -1]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/pad.mlir b/iree/test/e2e/tosa_ops/pad.mlir index dc60b22..254ca38 100644 --- a/iree/test/e2e/tosa_ops/pad.mlir +++ b/iree/test/e2e/tosa_ops/pad.mlir
@@ -1,5 +1,5 @@ func @pad_1D_test() { - %0 = iree.unfoldable_constant dense<42> : tensor<2xi32> + %0 = util.unfoldable_constant dense<42> : tensor<2xi32> %1 = "tosa.const"() { value = dense<[[3, 2]]> : tensor<1x2xi32> } : () -> (tensor<1x2xi32>) %result = "tosa.pad"(%0, %1) : (tensor<2xi32>, tensor<1x2xi32>) -> (tensor<7xi32>) check.expect_eq_const(%result, dense<[0, 0, 0, 42, 42, 0, 0]> : tensor<7xi32>) : tensor<7xi32> @@ -7,7 +7,7 @@ } func @pad_2D_test() { - %0 = iree.unfoldable_constant dense<42> : tensor<2x2xi32> + %0 = util.unfoldable_constant dense<42> : tensor<2x2xi32> %1 = "tosa.const"() { value = dense<[[1, 1], [1, 1]]> : tensor<2x2xi32> } : () -> (tensor<2x2xi32>) %result = "tosa.pad"(%0, %1) : (tensor<2x2xi32>, tensor<2x2xi32>) -> (tensor<4x4xi32>) check.expect_eq_const(%result, dense<[[0, 0, 0, 0], [0, 42, 42, 0], [0, 42, 42, 0], [0, 0, 0, 0]]> : tensor<4x4xi32>) : tensor<4x4xi32> @@ -15,7 +15,7 @@ } func @pad_3D_test() { - %0 = iree.unfoldable_constant dense<42> : tensor<1x1x2xi32> + %0 = util.unfoldable_constant dense<42> : tensor<1x1x2xi32> %1 = "tosa.const"() { value = dense<[[0, 1], [1, 0], [0, 0]]> : tensor<3x2xi32> } : () -> (tensor<3x2xi32>) %result = "tosa.pad"(%0, %1) : (tensor<1x1x2xi32>, tensor<3x2xi32>) -> (tensor<2x2x2xi32>) check.expect_eq_const(%result, dense<[[[0, 0], [42, 42]], [[0, 0], [0, 0]]]> : tensor<2x2x2xi32>) : tensor<2x2x2xi32>
diff --git a/iree/test/e2e/tosa_ops/reciprocal.mlir b/iree/test/e2e/tosa_ops/reciprocal.mlir index 1c7ce5c..209dd81 100644 --- a/iree/test/e2e/tosa_ops/reciprocal.mlir +++ b/iree/test/e2e/tosa_ops/reciprocal.mlir
@@ -1,5 +1,5 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[-1.0, -0.5, 10.0, 2.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[-1.0, -0.5, 10.0, 2.0]> : tensor<4xf32> %result = "tosa.reciprocal"(%0) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[-1.0, -2.0, 0.1, 0.5]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/tosa_ops/reduce.mlir b/iree/test/e2e/tosa_ops/reduce.mlir index d971e32..fbabc88 100644 --- a/iree/test/e2e/tosa_ops/reduce.mlir +++ b/iree/test/e2e/tosa_ops/reduce.mlir
@@ -1,61 +1,61 @@ func @reduce_max_float() { - %0 = iree.unfoldable_constant dense<[[2.0, 2.0], [3.0, 4.0], [1.0, 0.0]]> : tensor<3x2xf32> + %0 = util.unfoldable_constant dense<[[2.0, 2.0], [3.0, 4.0], [1.0, 0.0]]> : tensor<3x2xf32> %result = "tosa.reduce_max"(%0) {axis = 0 : i64} : (tensor<3x2xf32>) -> tensor<1x2xf32> check.expect_almost_eq_const(%result, dense<[[3.0, 4.0]]> : tensor<1x2xf32>) : tensor<1x2xf32> return } func @reduce_max_int() { - %0 = iree.unfoldable_constant dense<[[2, 2], [3, 4], [1, 0]]> : tensor<3x2xi32> + %0 = util.unfoldable_constant dense<[[2, 2], [3, 4], [1, 0]]> : tensor<3x2xi32> %result = "tosa.reduce_max"(%0) {axis = 0 : i64} : (tensor<3x2xi32>) -> tensor<1x2xi32> check.expect_eq_const(%result, dense<[[3, 4]]> : tensor<1x2xi32>) : tensor<1x2xi32> return } func @reduce_min_float() { - %0 = iree.unfoldable_constant dense<[[2.0, 2.0], [3.0, 4.0], [1.0, 0.0]]> : tensor<3x2xf32> + %0 = util.unfoldable_constant dense<[[2.0, 2.0], [3.0, 4.0], [1.0, 0.0]]> : tensor<3x2xf32> %result = "tosa.reduce_min"(%0) {axis = 0 : i64} : (tensor<3x2xf32>) -> tensor<1x2xf32> check.expect_almost_eq_const(%result, dense<[[1.0, 0.0]]> : tensor<1x2xf32>) : tensor<1x2xf32> return } func @reduce_min_int() { - %0 = iree.unfoldable_constant dense<[[2, 2], [3, 4], [1, 0]]> : tensor<3x2xi32> + %0 = util.unfoldable_constant dense<[[2, 2], [3, 4], [1, 0]]> : tensor<3x2xi32> %result = "tosa.reduce_min"(%0) {axis = 0 : i64} : (tensor<3x2xi32>) -> tensor<1x2xi32> check.expect_eq_const(%result, dense<[[1, 0]]> : tensor<1x2xi32>) : tensor<1x2xi32> return } func @reduce_prod_float() { - %0 = iree.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> + %0 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> %result = "tosa.reduce_prod"(%0) {axis = 0 : i64} : (tensor<2x3xf32>) -> tensor<1x3xf32> check.expect_almost_eq_const(%result, dense<[[4.0, 10.0, 18.0]]> : tensor<1x3xf32>) : tensor<1x3xf32> return } func @reduce_prod_int() { - %0 = iree.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> + %0 = util.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> %result = "tosa.reduce_prod"(%0) {axis = 0 : i64} : (tensor<2x3xi32>) -> tensor<1x3xi32> check.expect_eq_const(%result, dense<[[4, 10, 18]]> : tensor<1x3xi32>) : tensor<1x3xi32> return } func @reduce_sum_float() { - %0 = iree.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> + %0 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> %result = "tosa.reduce_sum"(%0) {axis = 0 : i64} : (tensor<2x3xf32>) -> tensor<1x3xf32> check.expect_almost_eq_const(%result, dense<[[5.0, 7.0, 9.0]]> : tensor<1x3xf32>) : tensor<1x3xf32> return } func @reduce_sum_int() { - %0 = iree.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> + %0 = util.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> %result = "tosa.reduce_sum"(%0) {axis = 0 : i64} : (tensor<2x3xi32>) -> tensor<1x3xi32> check.expect_eq_const(%result, dense<[[5, 7, 9]]> : tensor<1x3xi32>) : tensor<1x3xi32> return } func @reduce_sum_float_axis_1() { - %0 = iree.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> + %0 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> %result = "tosa.reduce_sum"(%0) {axis = 1 : i64} : (tensor<2x3xf32>) -> tensor<2x1xf32> check.expect_almost_eq_const(%result, dense<[[6.0], [15.0]]> : tensor<2x1xf32>) : tensor<2x1xf32> return
diff --git a/iree/test/e2e/tosa_ops/reluN.mlir b/iree/test/e2e/tosa_ops/reluN.mlir index 65e2ace..1ee3420 100644 --- a/iree/test/e2e/tosa_ops/reluN.mlir +++ b/iree/test/e2e/tosa_ops/reluN.mlir
@@ -1,12 +1,12 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[1.0, -1.0, 3.0, 5.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[1.0, -1.0, 3.0, 5.0]> : tensor<4xf32> %result = "tosa.reluN"(%0) {max_fp = 4.0 : f32, max_int = 4 : i64} : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[1.0, 0.0, 3.0, 4.0]> : tensor<4xf32>) : tensor<4xf32> return } func @tensor_int() { - %0 = iree.unfoldable_constant dense<[1, -1, 3, 5]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[1, -1, 3, 5]> : tensor<4xi32> %result = "tosa.reluN"(%0) {max_fp = 4.0 : f32, max_int = 4 : i64} : (tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[1, 0, 3, 4]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/reshape.mlir b/iree/test/e2e/tosa_ops/reshape.mlir index 682f21f..73b06bd 100644 --- a/iree/test/e2e/tosa_ops/reshape.mlir +++ b/iree/test/e2e/tosa_ops/reshape.mlir
@@ -1,19 +1,19 @@ func @tensor_downrank() { - %0 = iree.unfoldable_constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> + %0 = util.unfoldable_constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> %result = "tosa.reshape"(%0) { new_shape = [4] } : (tensor<2x2xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[1, 2, 3, 4]> : tensor<4xi32>) : tensor<4xi32> return } func @tensor_uprank() { - %0 = iree.unfoldable_constant dense<[1, 2, 3, 4]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[1, 2, 3, 4]> : tensor<4xi32> %result = "tosa.reshape"(%0) { new_shape = [2, 2] } : (tensor<4xi32>) -> tensor<2x2xi32> check.expect_eq_const(%result, dense<[[1, 2], [3, 4]]> : tensor<2x2xi32>) : tensor<2x2xi32> return } func @tensor_crossrank() { - %0 = iree.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> + %0 = util.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> %result = "tosa.reshape"(%0) { new_shape = [3, 2] } : (tensor<2x3xi32>) -> tensor<3x2xi32> check.expect_eq_const(%result, dense<[[1, 2], [3, 4], [5, 6]]> : tensor<3x2xi32>) : tensor<3x2xi32> return
diff --git a/iree/test/e2e/tosa_ops/rsqrt.mlir b/iree/test/e2e/tosa_ops/rsqrt.mlir index 5a26cab..61053b3 100644 --- a/iree/test/e2e/tosa_ops/rsqrt.mlir +++ b/iree/test/e2e/tosa_ops/rsqrt.mlir
@@ -1,5 +1,5 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[16.0, 4.0, 9.0, 1.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[16.0, 4.0, 9.0, 1.0]> : tensor<4xf32> %result = "tosa.rsqrt"(%0) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[0.25, 0.5, 0.3333, 1.0]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/tosa_ops/select.mlir b/iree/test/e2e/tosa_ops/select.mlir index 7d52c5b..8e4e7c7 100644 --- a/iree/test/e2e/tosa_ops/select.mlir +++ b/iree/test/e2e/tosa_ops/select.mlir
@@ -1,16 +1,16 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[0, 0, 1, 1]> : tensor<4xi1> - %1 = iree.unfoldable_constant dense<[1.0, 5.0, 3.0, 4.0]> : tensor<4xf32> - %2 = iree.unfoldable_constant dense<[5.0, 1.0, 3.0, 1.5]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[0, 0, 1, 1]> : tensor<4xi1> + %1 = util.unfoldable_constant dense<[1.0, 5.0, 3.0, 4.0]> : tensor<4xf32> + %2 = util.unfoldable_constant dense<[5.0, 1.0, 3.0, 1.5]> : tensor<4xf32> %result = "tosa.select"(%0, %1, %2) : (tensor<4xi1>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[5.0, 1.0, 3.0, 4.0]> : tensor<4xf32>) : tensor<4xf32> return } func @tensor_int() { - %0 = iree.unfoldable_constant dense<[0, 0, 1, 1]> : tensor<4xi1> - %1 = iree.unfoldable_constant dense<[1, 5, 3, 4]> : tensor<4xi32> - %2 = iree.unfoldable_constant dense<[5, 1, 3, 1]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[0, 0, 1, 1]> : tensor<4xi1> + %1 = util.unfoldable_constant dense<[1, 5, 3, 4]> : tensor<4xi32> + %2 = util.unfoldable_constant dense<[5, 1, 3, 1]> : tensor<4xi32> %result = "tosa.select"(%0, %1, %2) : (tensor<4xi1>, tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[5, 1, 3, 4]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/sigmoid.mlir b/iree/test/e2e/tosa_ops/sigmoid.mlir index 72e64f7..d5a4081 100644 --- a/iree/test/e2e/tosa_ops/sigmoid.mlir +++ b/iree/test/e2e/tosa_ops/sigmoid.mlir
@@ -1,5 +1,5 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[0.0, 1.0, 50.0, 100.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[0.0, 1.0, 50.0, 100.0]> : tensor<4xf32> %result = "tosa.sigmoid"(%0) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[0.5, 0.7310586, 1.0, 1.0]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/tosa_ops/sub.mlir b/iree/test/e2e/tosa_ops/sub.mlir index 559b40c..61c28bc 100644 --- a/iree/test/e2e/tosa_ops/sub.mlir +++ b/iree/test/e2e/tosa_ops/sub.mlir
@@ -1,14 +1,14 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[1.0, 5.0, 3.0, 4.0]> : tensor<4xf32> - %1 = iree.unfoldable_constant dense<[5.0, 1.0, 3.0, 1.5]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[1.0, 5.0, 3.0, 4.0]> : tensor<4xf32> + %1 = util.unfoldable_constant dense<[5.0, 1.0, 3.0, 1.5]> : tensor<4xf32> %result = "tosa.sub"(%0, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[-4.0, 4.0, 0.0, 2.5]> : tensor<4xf32>) : tensor<4xf32> return } func @tensor_int() { - %0 = iree.unfoldable_constant dense<[1, 5, 3, 4]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[5, 1, 3, 1]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[1, 5, 3, 4]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[5, 1, 3, 1]> : tensor<4xi32> %result = "tosa.sub"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[-4, 4, 0, 3]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/tosa_ops/tanh.mlir b/iree/test/e2e/tosa_ops/tanh.mlir index fee3f73..581da93 100644 --- a/iree/test/e2e/tosa_ops/tanh.mlir +++ b/iree/test/e2e/tosa_ops/tanh.mlir
@@ -1,5 +1,5 @@ func @tensor_float() { - %0 = iree.unfoldable_constant dense<[-1.0, 0.0, 0.5, 1.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[-1.0, 0.0, 0.5, 1.0]> : tensor<4xf32> %result = "tosa.tanh"(%0) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[-0.761594, 0.0, 0.462117, 0.761594]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/tosa_ops/while.mlir b/iree/test/e2e/tosa_ops/while.mlir index 080d948..7d967d3 100644 --- a/iree/test/e2e/tosa_ops/while.mlir +++ b/iree/test/e2e/tosa_ops/while.mlir
@@ -8,7 +8,7 @@ // i = 4, n = 2 func @while_test_iter0() { - %0 = iree.unfoldable_constant dense<4> : tensor<i32> + %0 = util.unfoldable_constant dense<4> : tensor<i32> %1 = "tosa.while_loop"(%0) ( { ^bb0(%arg0: tensor<i32>): %2 = "tosa.const"() {value = dense<3> : tensor<i32>} : () -> tensor<i32> @@ -26,7 +26,7 @@ // // i = 2, n = 2 // func @while_test_iter1() { -// %0 = iree.unfoldable_constant dense<2> : tensor<i32> +// %0 = util.unfoldable_constant dense<2> : tensor<i32> // %1 = "tosa.while_loop"(%0) ( { // ^bb0(%arg0: tensor<i32>): // %2 = "tosa.const"() {value = dense<3> : tensor<i32>} : () -> tensor<i32> @@ -44,7 +44,7 @@ // // i = 0, n = 2 // func @while_test_iter2() { -// %0 = iree.unfoldable_constant dense<0> : tensor<i32> +// %0 = util.unfoldable_constant dense<0> : tensor<i32> // %1 = "tosa.while_loop"(%0) ( { // ^bb0(%arg0: tensor<i32>): // %2 = "tosa.const"() {value = dense<3> : tensor<i32>} : () -> tensor<i32> @@ -62,7 +62,7 @@ // // i = 0, n = 1 // func @while_test_iter4() { -// %0 = iree.unfoldable_constant dense<0> : tensor<i32> +// %0 = util.unfoldable_constant dense<0> : tensor<i32> // %1 = "tosa.while_loop"(%0) ( { // ^bb0(%arg0: tensor<i32>): // %2 = "tosa.const"() {value = dense<3> : tensor<i32>} : () -> tensor<i32>
diff --git a/iree/test/e2e/vulkan_specific/compare.mlir b/iree/test/e2e/vulkan_specific/compare.mlir index 282f7ee..0f171be 100644 --- a/iree/test/e2e/vulkan_specific/compare.mlir +++ b/iree/test/e2e/vulkan_specific/compare.mlir
@@ -1,163 +1,163 @@ func @compare_tensor() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[0, 1, 0, 1]> : tensor<4xi8>) : tensor<4xi8> return } func @compare_scalar() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i32> - %rhs = iree.unfoldable_constant dense<5> : tensor<i32> + %lhs = util.unfoldable_constant dense<1> : tensor<i32> + %rhs = util.unfoldable_constant dense<5> : tensor<i32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i32>, tensor<i32>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_i8() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i8> - %rhs = iree.unfoldable_constant dense<5> : tensor<i8> + %lhs = util.unfoldable_constant dense<1> : tensor<i8> + %rhs = util.unfoldable_constant dense<5> : tensor<i8> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i8>, tensor<i8>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_i16() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i16> - %rhs = iree.unfoldable_constant dense<5> : tensor<i16> + %lhs = util.unfoldable_constant dense<1> : tensor<i16> + %rhs = util.unfoldable_constant dense<5> : tensor<i16> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i16>, tensor<i16>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_i32() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i32> - %rhs = iree.unfoldable_constant dense<5> : tensor<i32> + %lhs = util.unfoldable_constant dense<1> : tensor<i32> + %rhs = util.unfoldable_constant dense<5> : tensor<i32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i32>, tensor<i32>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_i64() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i64> - %rhs = iree.unfoldable_constant dense<5> : tensor<i64> + %lhs = util.unfoldable_constant dense<1> : tensor<i64> + %rhs = util.unfoldable_constant dense<5> : tensor<i64> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i64>, tensor<i64>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_f32() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<f32> - %rhs = iree.unfoldable_constant dense<5.0> : tensor<f32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<f32> + %rhs = util.unfoldable_constant dense<5.0> : tensor<f32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<f32>, tensor<f32>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_f64() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<f64> - %rhs = iree.unfoldable_constant dense<5.0> : tensor<f64> + %lhs = util.unfoldable_constant dense<1.0> : tensor<f64> + %rhs = util.unfoldable_constant dense<5.0> : tensor<f64> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<f64>, tensor<f64>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_tensor_odd_length() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7]> : tensor<3xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3]> : tensor<3xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7]> : tensor<3xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3]> : tensor<3xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<3xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<3xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<3xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<3xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<3xi1>, tensor<3xi8>, tensor<3xi8>) -> tensor<3xi8> check.expect_eq_const(%output, dense<[0, 1, 0]> : tensor<3xi8>) : tensor<3xi8> return } func @compare_eq() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[0, 1, 0, 1]> : tensor<4xi8>) : tensor<4xi8> return } func @compare_ne() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "NE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[1, 0, 1, 0]> : tensor<4xi8>) : tensor<4xi8> return } func @compare_lt() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "LT"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[1, 0, 0, 0]> : tensor<4xi8>) : tensor<4xi8> return } func @compare_le() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "LE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[1, 1, 0, 1]> : tensor<4xi8>) : tensor<4xi8> return } func @compare_gt() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "GT"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[0, 0, 1, 0]> : tensor<4xi8>) : tensor<4xi8> return } func @compare_ge() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "GE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[0, 1, 1, 1]> : tensor<4xi8>) : tensor<4xi8> return
diff --git a/iree/test/e2e/vulkan_specific/conv.mlir b/iree/test/e2e/vulkan_specific/conv.mlir index 21de95a..13f562f 100644 --- a/iree/test/e2e/vulkan_specific/conv.mlir +++ b/iree/test/e2e/vulkan_specific/conv.mlir
@@ -1,5 +1,5 @@ func @conv() { - %0 = iree.unfoldable_constant dense< + %0 = util.unfoldable_constant dense< [[[[0.5 , 0.5212766 ], [0.54255319, 0.56382979], [0.58510638, 0.60638298], @@ -27,7 +27,7 @@ [1.39361702, 1.41489362], [1.43617021, 1.45744681], [1.4787234 , 1.5 ]]]]> : tensor<1x4x6x2xf32> - %1 = iree.unfoldable_constant dense< + %1 = util.unfoldable_constant dense< [[[[0.5 , 0.52857143, 0.55714286], [0.58571429, 0.61428571, 0.64285714]],
diff --git a/iree/test/e2e/vulkan_specific/dot_general.mlir b/iree/test/e2e/vulkan_specific/dot_general.mlir index a8e1fd8..e8c3217 100644 --- a/iree/test/e2e/vulkan_specific/dot_general.mlir +++ b/iree/test/e2e/vulkan_specific/dot_general.mlir
@@ -1,6 +1,6 @@ func @dot_general_trivial_batching_dimension() { - %lhs = iree.unfoldable_constant dense<[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]]> : tensor<1x2x3xf32> - %rhs = iree.unfoldable_constant dense<[[ + %lhs = util.unfoldable_constant dense<[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]]> : tensor<1x2x3xf32> + %rhs = util.unfoldable_constant dense<[[ [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0]]]> : tensor<1x3x4xf32> @@ -18,10 +18,10 @@ } func @dot_general_nontrivial_batching_dimension() { - %lhs = iree.unfoldable_constant dense<[ + %lhs = util.unfoldable_constant dense<[ [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]> : tensor<2x2x3xf32> - %rhs = iree.unfoldable_constant dense<[[ + %rhs = util.unfoldable_constant dense<[[ [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0] @@ -50,8 +50,8 @@ // TODO(GH-6070): Re-enable this after fixing wrong answer on Pixel 4. // func @large_dot_general2() { -// %lhs = iree.unfoldable_constant dense<1.0> : tensor<4x32x1024xf32> -// %rhs = iree.unfoldable_constant dense<0.4> : tensor<4x1024x64xf32> +// %lhs = util.unfoldable_constant dense<1.0> : tensor<4x32x1024xf32> +// %rhs = util.unfoldable_constant dense<0.4> : tensor<4x1024x64xf32> // %res = "mhlo.dot_general"(%lhs, %rhs) { // dot_dimension_numbers = { // lhs_batching_dimensions = dense<0> : tensor<1xi64>,
diff --git a/iree/test/e2e/vulkan_specific/gemm.mlir b/iree/test/e2e/vulkan_specific/gemm.mlir index bb4dd76..5163cc9 100644 --- a/iree/test/e2e/vulkan_specific/gemm.mlir +++ b/iree/test/e2e/vulkan_specific/gemm.mlir
@@ -1,5 +1,5 @@ func @gemm() { - %0 = iree.unfoldable_constant dense< + %0 = util.unfoldable_constant dense< [[0. , 0.00515464, 0.01030928, 0.01546392, 0.02061856, 0.0257732 , 0.03092784, 0.03608247, 0.04123711, 0.04639175, 0.05154639, 0.05670103, 0.06185567, 0.06701031, 0.07216495], @@ -40,7 +40,7 @@ 0.95360825, 0.95876289, 0.96391753, 0.96907216, 0.9742268 , 0.97938144, 0.98453608, 0.98969072, 0.99484536, 1. ]]> : tensor<13x15xf32> - %1 = iree.unfoldable_constant dense< + %1 = util.unfoldable_constant dense< [[0. , 0.00558659, 0.01117318, 0.01675978, 0.02234637, 0.02793296, 0.03351955, 0.03910615, 0.04469274, 0.05027933, 0.05586592, 0.06145251],
diff --git a/iree/test/e2e/vulkan_specific/log_plus_one.mlir b/iree/test/e2e/vulkan_specific/log_plus_one.mlir index 4b4bfad..70e7120 100644 --- a/iree/test/e2e/vulkan_specific/log_plus_one.mlir +++ b/iree/test/e2e/vulkan_specific/log_plus_one.mlir
@@ -1,5 +1,5 @@ func @log_plus_one() { - %input = iree.unfoldable_constant dense<[0.0, 0.5, 1.0, 5.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[0.0, 0.5, 1.0, 5.0]> : tensor<4xf32> %result = "mhlo.log_plus_one"(%input) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[0.0, 0.4054651, 0.6931472, 1.7917595]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/vulkan_specific/pw_add_multiwg.mlir b/iree/test/e2e/vulkan_specific/pw_add_multiwg.mlir index 723286c..fc9dec7 100644 --- a/iree/test/e2e/vulkan_specific/pw_add_multiwg.mlir +++ b/iree/test/e2e/vulkan_specific/pw_add_multiwg.mlir
@@ -1,9 +1,9 @@ func @pw_add() { - %0 = iree.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6, 7, 8], + %0 = util.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15, 16], [17, 18, 19, 20, 21, 22, 23, 24], [25, 26, 27, 28, 29, 30, 31, 32]]> : tensor<4x8xi32> - %1 = iree.unfoldable_constant dense<[[2, 4, 6, 8, 10, 12, 14, 16], + %1 = util.unfoldable_constant dense<[[2, 4, 6, 8, 10, 12, 14, 16], [18, 20, 22, 24, 26, 28, 30, 32], [34, 36, 38, 40, 42, 44, 46, 48], [50, 52, 54, 56, 58, 60, 62, 64]]> : tensor<4x8xi32>
diff --git a/iree/test/e2e/vulkan_specific/reduce.mlir b/iree/test/e2e/vulkan_specific/reduce.mlir index 435c0f6..5b151d2 100644 --- a/iree/test/e2e/vulkan_specific/reduce.mlir +++ b/iree/test/e2e/vulkan_specific/reduce.mlir
@@ -1,6 +1,6 @@ func @reduce_dim_1() { - %0 = iree.unfoldable_constant dense<[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]> : tensor<2x5xi32> - %1 = iree.unfoldable_constant dense<10> : tensor<i32> + %0 = util.unfoldable_constant dense<[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]> : tensor<2x5xi32> + %1 = util.unfoldable_constant dense<10> : tensor<i32> %2 = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0 : tensor<i32>, %arg1 : tensor<i32>): %3 = "mhlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32> @@ -13,7 +13,7 @@ // Constants get folded in which linalg.indexed_generic ops. Check to // make sure this works as expected. func @reduce_dim_1_const() { - %0 = iree.unfoldable_constant dense<[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]> : tensor<2x5xi32> + %0 = util.unfoldable_constant dense<[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]> : tensor<2x5xi32> %1 = constant dense<10> : tensor<i32> %2 = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0 : tensor<i32>, %arg1 : tensor<i32>): @@ -25,8 +25,8 @@ } func @reduce_dim_0() { - %0 = iree.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]> : tensor<1x10xi32> - %1 = iree.unfoldable_constant dense<10> : tensor<i32> + %0 = util.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]> : tensor<1x10xi32> + %1 = util.unfoldable_constant dense<10> : tensor<i32> %2 = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0 : tensor<i32>, %arg1 : tensor<i32>): %3 = "mhlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32> @@ -37,8 +37,8 @@ } func @reduce_to_scalar() { - %0 = iree.unfoldable_constant dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]> : tensor<10xi32> - %1 = iree.unfoldable_constant dense<10> : tensor<i32> + %0 = util.unfoldable_constant dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]> : tensor<10xi32> + %1 = util.unfoldable_constant dense<10> : tensor<i32> %2 = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0 : tensor<i32>, %arg1 : tensor<i32>): %3 = "mhlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
diff --git a/iree/test/e2e/vulkan_specific/vectorized_conv.mlir b/iree/test/e2e/vulkan_specific/vectorized_conv.mlir index 90a9f4d..ab6a290 100644 --- a/iree/test/e2e/vulkan_specific/vectorized_conv.mlir +++ b/iree/test/e2e/vulkan_specific/vectorized_conv.mlir
@@ -1,5 +1,5 @@ func @conv() { - %input = iree.unfoldable_constant dense< + %input = util.unfoldable_constant dense< [[[[6.0, 7.5, 0.0, 1.5], [1.5, 3.5, 4.5, 2.0], [3.0, 6.0, 0.5, 3.0]], @@ -10,7 +10,7 @@ [4.5, 0.0, 5.0, 1.5], [5.5, 1.0, 0.0, 0.0]]]]> : tensor<1x3x3x4xf32> - %filter = iree.unfoldable_constant dense< + %filter = util.unfoldable_constant dense< [[[[2.0, 2.5, 2.5, 3.0, 4.0, 2.0, 0.5, 2.0, 4.5, 5.0, 5.0, 4.0, 0.5, 0.5, 3.5, 4.5, 4.5, 1.5, 3.0, 3.5, 1.0, 0.0, 1.5, 2.5, 4.5, 5.0, 2.0, 2.0, 3.0, 2.0, 2.0, 1.5], [2.0, 2.0, 4.0, 2.0, 1.5, 5.0, 3.5, 2.5, 2.5, 0.0, 0.5, 2.5, 4.5, 1.5, 0.0, 2.5, @@ -69,13 +69,13 @@ } func @depthwise_conv() { - %input = iree.unfoldable_constant dense< + %input = util.unfoldable_constant dense< [[[[6.0, 7.5, 0.0, 1.5, 1.5, 3.5, 4.5, 2.0, 3.0, 6.0, 0.5, 3.0, 3.5, 7.0, 2.5, 6.5], [4.0, 4.5, 8.0, 2.5, 7.5, 7.5, 0.0, 1.5, 7.0, 3.5, 0.0, 0.5, 4.5, 0.0, 5.0, 1.5], [5.5, 1.0, 0.0, 0.0, 2.0, 2.5, 3.0, 4.0, 7.5, 2.0, 4.5, 5.0, 0.5, 0.5, 3.5, 4.5], [1.5, 3.0, 5.5, 7.0, 0.0, 7.0, 1.5, 6.0, 5.0, 5.5, 2.0, 3.0, 2.0, 7.5, 1.5, 6.0]]]]> : tensor<1x1x4x16xf32> - %filter = iree.unfoldable_constant dense< + %filter = util.unfoldable_constant dense< [[[[2.0, 2.0, 4.0, 2.0, 1.5, 5.0, 3.5, 2.5, 2.5, 0.0, 0.5, 2.5, 4.5, 1.5, 0.0, 2.5]]]]> : tensor<1x1x1x16xf32>
diff --git a/iree/test/e2e/xla_ops/abs.mlir b/iree/test/e2e/xla_ops/abs.mlir index 078cee8..3ef7097 100644 --- a/iree/test/e2e/xla_ops/abs.mlir +++ b/iree/test/e2e/xla_ops/abs.mlir
@@ -1,12 +1,12 @@ func @tensor() { - %input = iree.unfoldable_constant dense<[-1.0, -2.0, 3.0, 4.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[-1.0, -2.0, 3.0, 4.0]> : tensor<4xf32> %result = "mhlo.abs"(%input) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32>) : tensor<4xf32> return } func @scalar() { - %input = iree.unfoldable_constant dense<-4.0> : tensor<f32> + %input = util.unfoldable_constant dense<-4.0> : tensor<f32> %result = "mhlo.abs"(%input) : (tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<4.0> : tensor<f32>) : tensor<f32> return
diff --git a/iree/test/e2e/xla_ops/add.mlir b/iree/test/e2e/xla_ops/add.mlir index 0aa185a..f5fe42d 100644 --- a/iree/test/e2e/xla_ops/add.mlir +++ b/iree/test/e2e/xla_ops/add.mlir
@@ -1,18 +1,18 @@ func @tensor() { - %0 = iree.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> - %1 = iree.unfoldable_constant dense<[5.0, 6.0, 7.0, 8.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> + %1 = util.unfoldable_constant dense<[5.0, 6.0, 7.0, 8.0]> : tensor<4xf32> %result = "mhlo.add"(%0, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[6.0, 8.0, 10.0, 12.0]> : tensor<4xf32>) : tensor<4xf32> return } func @tensor_4d() { - %0 = iree.unfoldable_constant dense<[[[[1.0, 2.0], [3.0, 4.0]], + %0 = util.unfoldable_constant dense<[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], [[[9.0, 10.0], [11.0, 12.0]], [[13.0, 14.0], [15.0, 16.0]]]]> : tensor<2x2x2x2xf32> - %1 = iree.unfoldable_constant dense<[[[[1.0, 2.0], [3.0, 4.0]], + %1 = util.unfoldable_constant dense<[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], [[[9.0, 10.0], [11.0, 12.0]], [[13.0, 14.0], [15.0, 16.0]]]]> :
diff --git a/iree/test/e2e/xla_ops/batch_norm_inference.mlir b/iree/test/e2e/xla_ops/batch_norm_inference.mlir index 41a30f2..20cdcbc 100644 --- a/iree/test/e2e/xla_ops/batch_norm_inference.mlir +++ b/iree/test/e2e/xla_ops/batch_norm_inference.mlir
@@ -1,9 +1,9 @@ func @batchnorm_inference_4x2() { - %x = iree.unfoldable_constant dense<[[1.0, 2.0, 3.0, 4.0],[5.0, 6.0, 7.0, 8.0]]> : tensor<2x4xf32> - %mean = iree.unfoldable_constant dense<[1.0, 1.0, 1.0, 1.0]> : tensor<4xf32> - %var = iree.unfoldable_constant dense<[2.0, 2.0, 2.0, 2.0]> : tensor<4xf32> - %offset = iree.unfoldable_constant dense<[1.0, 1.0, 1.0, 1.0]> : tensor<4xf32> - %scale = iree.unfoldable_constant dense<[1.0, 1.0, 1.0, 1.0]> : tensor<4xf32> + %x = util.unfoldable_constant dense<[[1.0, 2.0, 3.0, 4.0],[5.0, 6.0, 7.0, 8.0]]> : tensor<2x4xf32> + %mean = util.unfoldable_constant dense<[1.0, 1.0, 1.0, 1.0]> : tensor<4xf32> + %var = util.unfoldable_constant dense<[2.0, 2.0, 2.0, 2.0]> : tensor<4xf32> + %offset = util.unfoldable_constant dense<[1.0, 1.0, 1.0, 1.0]> : tensor<4xf32> + %scale = util.unfoldable_constant dense<[1.0, 1.0, 1.0, 1.0]> : tensor<4xf32> %result = "mhlo.batch_norm_inference"(%x, %mean, %var, %offset, %scale) {epsilon = 1.000000e-03 : f32, feature_index = 1 : i64} : (tensor<2x4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<2x4xf32> // TODO(b/146898896): This should probably be a fuzzier check with round values. check.expect_almost_eq_const(%result, dense<[
diff --git a/iree/test/e2e/xla_ops/broadcast.mlir b/iree/test/e2e/xla_ops/broadcast.mlir index 62edccd..5399c40 100644 --- a/iree/test/e2e/xla_ops/broadcast.mlir +++ b/iree/test/e2e/xla_ops/broadcast.mlir
@@ -1,5 +1,5 @@ func @broadcast_2D_3D() { - %input = iree.unfoldable_constant dense<[[1, 2, 3, 4], + %input = util.unfoldable_constant dense<[[1, 2, 3, 4], [5, 6, 7, 8]]> : tensor<2x4xi32> %result = "mhlo.broadcast"(%input) {broadcast_sizes = dense<3> : tensor<1xi64>} : (tensor<2x4xi32>) -> tensor<3x2x4xi32> check.expect_eq_const(%result, dense<[ @@ -10,7 +10,7 @@ } func @broadcast_3D_scalar() { - %input = iree.unfoldable_constant dense<42> : tensor<i32> + %input = util.unfoldable_constant dense<42> : tensor<i32> %result = "mhlo.broadcast"(%input) {broadcast_sizes = dense<[3, 2, 4]> : tensor<3xi64>} : (tensor<i32>) -> tensor<3x2x4xi32> check.expect_eq_const(%result, dense<[ [[42, 42, 42, 42], [42, 42, 42, 42]],
diff --git a/iree/test/e2e/xla_ops/broadcast_add.mlir b/iree/test/e2e/xla_ops/broadcast_add.mlir index 10d770f..0fc73d2 100644 --- a/iree/test/e2e/xla_ops/broadcast_add.mlir +++ b/iree/test/e2e/xla_ops/broadcast_add.mlir
@@ -1,6 +1,6 @@ func @tensor() { - %0 = iree.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> - %1 = iree.unfoldable_constant dense<2.0> : tensor<3x4xf32> + %0 = util.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> + %1 = util.unfoldable_constant dense<2.0> : tensor<3x4xf32> %result = "chlo.broadcast_add"(%0, %1) : (tensor<4xf32>, tensor<3x4xf32>) -> tensor<3x4xf32> check.expect_almost_eq_const(%result, dense<[[3.0, 4.0, 5.0, 6.0],
diff --git a/iree/test/e2e/xla_ops/broadcast_in_dim.mlir b/iree/test/e2e/xla_ops/broadcast_in_dim.mlir index b832e97..ad0e322 100644 --- a/iree/test/e2e/xla_ops/broadcast_in_dim.mlir +++ b/iree/test/e2e/xla_ops/broadcast_in_dim.mlir
@@ -1,5 +1,5 @@ func @broadcast_in_dim_2D_3D() { - %input = iree.unfoldable_constant dense<[[1, 2, 3, 4], + %input = util.unfoldable_constant dense<[[1, 2, 3, 4], [5, 6, 7, 8]]> : tensor<2x4xi32> %res = "mhlo.broadcast_in_dim"(%input) {broadcast_dimensions = dense<[1, 2]> : tensor<2xi64>} : (tensor<2x4xi32>) -> tensor<3x2x4xi32> check.expect_eq_const(%res, dense<[ @@ -10,7 +10,7 @@ } func @broadcast_in_dim_3D_scalar() { - %input = iree.unfoldable_constant dense<42> : tensor<i32> + %input = util.unfoldable_constant dense<42> : tensor<i32> %res = "mhlo.broadcast_in_dim"(%input) {broadcast_dimensions = dense<[]> : tensor<0xi64>} : (tensor<i32>) -> tensor<3x2x4xi32> check.expect_eq_const(%res, dense<42> : tensor<3x2x4xi32>) : tensor<3x2x4xi32> return
diff --git a/iree/test/e2e/xla_ops/clamp.mlir b/iree/test/e2e/xla_ops/clamp.mlir index 0dc5a3c..95e02f2 100644 --- a/iree/test/e2e/xla_ops/clamp.mlir +++ b/iree/test/e2e/xla_ops/clamp.mlir
@@ -1,34 +1,34 @@ func @i8() { - %min = iree.unfoldable_constant dense<[0, 0, 0, 0]> : tensor<4xi8> - %val = iree.unfoldable_constant dense<[-2, 4, 8, 12]> : tensor<4xi8> - %max = iree.unfoldable_constant dense<[10, 10, 10, 10]> : tensor<4xi8> + %min = util.unfoldable_constant dense<[0, 0, 0, 0]> : tensor<4xi8> + %val = util.unfoldable_constant dense<[-2, 4, 8, 12]> : tensor<4xi8> + %max = util.unfoldable_constant dense<[10, 10, 10, 10]> : tensor<4xi8> %result = "mhlo.clamp"(%min, %val, %max) : (tensor<4xi8>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%result, dense<[0, 4, 8, 10]> : tensor<4xi8>) : tensor<4xi8> return } func @i16() { - %min = iree.unfoldable_constant dense<[0, 0, 0, 0]> : tensor<4xi16> - %val = iree.unfoldable_constant dense<[-2, 4, 8, 12]> : tensor<4xi16> - %max = iree.unfoldable_constant dense<[10, 10, 10, 10]> : tensor<4xi16> + %min = util.unfoldable_constant dense<[0, 0, 0, 0]> : tensor<4xi16> + %val = util.unfoldable_constant dense<[-2, 4, 8, 12]> : tensor<4xi16> + %max = util.unfoldable_constant dense<[10, 10, 10, 10]> : tensor<4xi16> %result = "mhlo.clamp"(%min, %val, %max) : (tensor<4xi16>, tensor<4xi16>, tensor<4xi16>) -> tensor<4xi16> check.expect_eq_const(%result, dense<[0, 4, 8, 10]> : tensor<4xi16>) : tensor<4xi16> return } func @i32() { - %min = iree.unfoldable_constant dense<[0, 0, 0, 0]> : tensor<4xi32> - %val = iree.unfoldable_constant dense<[-2, 4, 8, 12]> : tensor<4xi32> - %max = iree.unfoldable_constant dense<[10, 10, 10, 10]> : tensor<4xi32> + %min = util.unfoldable_constant dense<[0, 0, 0, 0]> : tensor<4xi32> + %val = util.unfoldable_constant dense<[-2, 4, 8, 12]> : tensor<4xi32> + %max = util.unfoldable_constant dense<[10, 10, 10, 10]> : tensor<4xi32> %result = "mhlo.clamp"(%min, %val, %max) : (tensor<4xi32>, tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[0, 4, 8, 10]> : tensor<4xi32>) : tensor<4xi32> return } func @f32() { - %min = iree.unfoldable_constant dense<[0.0, 0.0, 0.0, 0.0]> : tensor<4xf32> - %val = iree.unfoldable_constant dense<[-2.0, 4.0, 8.0, 12.0]> : tensor<4xf32> - %max = iree.unfoldable_constant dense<[10.0, 10.0, 10.0, 10.0]> : tensor<4xf32> + %min = util.unfoldable_constant dense<[0.0, 0.0, 0.0, 0.0]> : tensor<4xf32> + %val = util.unfoldable_constant dense<[-2.0, 4.0, 8.0, 12.0]> : tensor<4xf32> + %max = util.unfoldable_constant dense<[10.0, 10.0, 10.0, 10.0]> : tensor<4xf32> %result = "mhlo.clamp"(%min, %val, %max) : (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> check.expect_eq_const(%result, dense<[0.0, 4.0, 8.0, 10.0]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/xla_ops/compare.mlir b/iree/test/e2e/xla_ops/compare.mlir index 282f7ee..0f171be 100644 --- a/iree/test/e2e/xla_ops/compare.mlir +++ b/iree/test/e2e/xla_ops/compare.mlir
@@ -1,163 +1,163 @@ func @compare_tensor() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[0, 1, 0, 1]> : tensor<4xi8>) : tensor<4xi8> return } func @compare_scalar() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i32> - %rhs = iree.unfoldable_constant dense<5> : tensor<i32> + %lhs = util.unfoldable_constant dense<1> : tensor<i32> + %rhs = util.unfoldable_constant dense<5> : tensor<i32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i32>, tensor<i32>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_i8() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i8> - %rhs = iree.unfoldable_constant dense<5> : tensor<i8> + %lhs = util.unfoldable_constant dense<1> : tensor<i8> + %rhs = util.unfoldable_constant dense<5> : tensor<i8> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i8>, tensor<i8>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_i16() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i16> - %rhs = iree.unfoldable_constant dense<5> : tensor<i16> + %lhs = util.unfoldable_constant dense<1> : tensor<i16> + %rhs = util.unfoldable_constant dense<5> : tensor<i16> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i16>, tensor<i16>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_i32() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i32> - %rhs = iree.unfoldable_constant dense<5> : tensor<i32> + %lhs = util.unfoldable_constant dense<1> : tensor<i32> + %rhs = util.unfoldable_constant dense<5> : tensor<i32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i32>, tensor<i32>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_i64() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i64> - %rhs = iree.unfoldable_constant dense<5> : tensor<i64> + %lhs = util.unfoldable_constant dense<1> : tensor<i64> + %rhs = util.unfoldable_constant dense<5> : tensor<i64> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i64>, tensor<i64>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_f32() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<f32> - %rhs = iree.unfoldable_constant dense<5.0> : tensor<f32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<f32> + %rhs = util.unfoldable_constant dense<5.0> : tensor<f32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<f32>, tensor<f32>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_f64() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<f64> - %rhs = iree.unfoldable_constant dense<5.0> : tensor<f64> + %lhs = util.unfoldable_constant dense<1.0> : tensor<f64> + %rhs = util.unfoldable_constant dense<5.0> : tensor<f64> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<f64>, tensor<f64>) -> tensor<i1> - %c0 = iree.unfoldable_constant dense<0> : tensor<i8> - %c1 = iree.unfoldable_constant dense<1> : tensor<i8> + %c0 = util.unfoldable_constant dense<0> : tensor<i8> + %c1 = util.unfoldable_constant dense<1> : tensor<i8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%output, dense<0> : tensor<i8>) : tensor<i8> return } func @compare_tensor_odd_length() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7]> : tensor<3xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3]> : tensor<3xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7]> : tensor<3xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3]> : tensor<3xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<3xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<3xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<3xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<3xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<3xi1>, tensor<3xi8>, tensor<3xi8>) -> tensor<3xi8> check.expect_eq_const(%output, dense<[0, 1, 0]> : tensor<3xi8>) : tensor<3xi8> return } func @compare_eq() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[0, 1, 0, 1]> : tensor<4xi8>) : tensor<4xi8> return } func @compare_ne() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "NE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[1, 0, 1, 0]> : tensor<4xi8>) : tensor<4xi8> return } func @compare_lt() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "LT"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[1, 0, 0, 0]> : tensor<4xi8>) : tensor<4xi8> return } func @compare_le() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "LE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[1, 1, 0, 1]> : tensor<4xi8>) : tensor<4xi8> return } func @compare_gt() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "GT"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[0, 0, 1, 0]> : tensor<4xi8>) : tensor<4xi8> return } func @compare_ge() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "GE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[0, 1, 1, 1]> : tensor<4xi8>) : tensor<4xi8> return
diff --git a/iree/test/e2e/xla_ops/concatenate.mlir b/iree/test/e2e/xla_ops/concatenate.mlir index 45cffd6..77200e1 100644 --- a/iree/test/e2e/xla_ops/concatenate.mlir +++ b/iree/test/e2e/xla_ops/concatenate.mlir
@@ -1,7 +1,7 @@ func @xla_concatenate() { - %c0 = iree.unfoldable_constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> - %c1 = iree.unfoldable_constant dense<[[5, 6, 7], [8, 9, 10]]> : tensor<2x3xi32> - %c2 = iree.unfoldable_constant dense<[[11, 12], [13, 14]]> : tensor<2x2xi32> + %c0 = util.unfoldable_constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> + %c1 = util.unfoldable_constant dense<[[5, 6, 7], [8, 9, 10]]> : tensor<2x3xi32> + %c2 = util.unfoldable_constant dense<[[11, 12], [13, 14]]> : tensor<2x2xi32> %0 = "mhlo.concatenate"(%c0, %c1) {dimension = 1} : (tensor<2x2xi32>, tensor<2x3xi32>) -> tensor<2x5xi32> check.expect_eq_const(%0, dense<[[1, 2, 5, 6, 7], [3, 4, 8, 9, 10]]> : tensor<2x5xi32>) : tensor<2x5xi32> @@ -18,7 +18,7 @@ } func @concatenate_cst() { - %c0 = iree.unfoldable_constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> + %c0 = util.unfoldable_constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> %c1 = mhlo.constant dense<0> : tensor<2x3xi32> %0 = "mhlo.concatenate"(%c0, %c1) {dimension = 1} : (tensor<2x2xi32>, tensor<2x3xi32>) -> tensor<2x5xi32> check.expect_eq_const(%0, dense<[[1, 2, 0, 0, 0], [3, 4, 0, 0, 0]]> : tensor<2x5xi32>) : tensor<2x5xi32>
diff --git a/iree/test/e2e/xla_ops/convert.mlir b/iree/test/e2e/xla_ops/convert.mlir index 1f273d1..fb9a8f2 100644 --- a/iree/test/e2e/xla_ops/convert.mlir +++ b/iree/test/e2e/xla_ops/convert.mlir
@@ -1,47 +1,47 @@ func @narrow_int_i32_i8() { - %input = iree.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi32> + %input = util.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi32> %res = "mhlo.convert"(%input) : (tensor<3xi32>) -> tensor<3xi8> check.expect_eq_const(%res, dense<[-42, 0, 42]> : tensor<3xi8>) : tensor<3xi8> return } func @widen_int_i8_i32() { - %input = iree.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi8> + %input = util.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi8> %res = "mhlo.convert"(%input) : (tensor<3xi8>) -> tensor<3xi32> check.expect_eq_const(%res, dense<[-42, 0, 42]> : tensor<3xi32>) : tensor<3xi32> return } func @narrow_int_i32_i16() { - %input = iree.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi32> + %input = util.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi32> %res = "mhlo.convert"(%input) : (tensor<3xi32>) -> tensor<3xi16> check.expect_eq_const(%res, dense<[-42, 0, 42]> : tensor<3xi16>) : tensor<3xi16> return } func @widen_int_i16_i32() { - %input = iree.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi16> + %input = util.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi16> %res = "mhlo.convert"(%input) : (tensor<3xi16>) -> tensor<3xi32> check.expect_eq_const(%res, dense<[-42, 0, 42]> : tensor<3xi32>) : tensor<3xi32> return } func @narrow_int_i64_i32() { - %input = iree.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi64> + %input = util.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi64> %res = "mhlo.convert"(%input) : (tensor<3xi64>) -> tensor<3xi32> check.expect_eq_const(%res, dense<[-42, 0, 42]> : tensor<3xi32>) : tensor<3xi32> return } func @widen_int_i32_i64() { - %input = iree.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi32> + %input = util.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi32> %res = "mhlo.convert"(%input) : (tensor<3xi32>) -> tensor<3xi64> check.expect_eq_const(%res, dense<[-42, 0, 42]> : tensor<3xi64>) : tensor<3xi64> return } func @int_to_float() { - %input = iree.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi32> + %input = util.unfoldable_constant dense<[-42, 0, 42]> : tensor<3xi32> %res = "mhlo.convert"(%input) : (tensor<3xi32>) -> tensor<3xf32> check.expect_almost_eq_const(%res, dense<[-42.0, 0.0, 42.0]> : tensor<3xf32>) : tensor<3xf32> return @@ -54,7 +54,7 @@ // For casting rules, see // https://www.tensorflow.org/xla/operation_semantics#convertelementtype // func @float_to_int() { -// %input = iree.unfoldable_constant dense<[-10.5, -4.4, 4.4, 10.5]> : tensor<4xf32> +// %input = util.unfoldable_constant dense<[-10.5, -4.4, 4.4, 10.5]> : tensor<4xf32> // %res = "mhlo.convert"(%input) : (tensor<4xf32>) -> tensor<4xi32> // check.expect_eq_const(%res, dense<[-10, -4, 4, 10]> : tensor<4xi32>) : tensor<4xi32> // return
diff --git a/iree/test/e2e/xla_ops/convolution.mlir b/iree/test/e2e/xla_ops/convolution.mlir index 450d7c1..8472efd 100644 --- a/iree/test/e2e/xla_ops/convolution.mlir +++ b/iree/test/e2e/xla_ops/convolution.mlir
@@ -1,10 +1,10 @@ func @conv2d_nopadding() { - %inputs = iree.unfoldable_constant dense<[[ + %inputs = util.unfoldable_constant dense<[[ [[ 1.0, 2.0], [ 3.0, 4.0], [ 5.0, 6.0], [ 7.0, 8.0]], [[11.0, 12.0], [13.0, 14.0], [15.0, 16.0], [17.0, 18.0]], [[21.0, 22.0], [23.0, 24.0], [25.0, 26.0], [27.0, 28.0]], [[31.0, 32.0], [33.0, 34.0], [35.0, 36.0], [37.0, 38.0]]]]> : tensor<1x4x4x2xf32> - %weights = iree.unfoldable_constant dense<[ + %weights = util.unfoldable_constant dense<[ [[[ 1.0], [ 2.0]], [[ 3.0], [ 4.0]]], [[[ 5.0], [ 6.0]], [[ 7.0], [ 8.0]]], [[[ 9.0], [10.0]], [[11.0], [12.0]]]]> : tensor<3x2x2x1xf32> @@ -31,7 +31,7 @@ } func @conv2d_nopadding_batch_feature() { - %inputs = iree.unfoldable_constant dense<[ + %inputs = util.unfoldable_constant dense<[ [[[ 1.0], [ 3.0], [ 5.0], [ 7.0]], [[11.0], [13.0], [15.0], [17.0]], [[21.0], [23.0], [25.0], [27.0]], @@ -41,7 +41,7 @@ [[22.0], [24.0], [26.0], [28.0]], [[32.0], [34.0], [36.0], [38.0]]] ]> : tensor<2x4x4x1xf32> - %weights = iree.unfoldable_constant dense<[ + %weights = util.unfoldable_constant dense<[ [[[ 1.0], [ 2.0]], [[ 3.0], [ 4.0]]], [[[ 5.0], [ 6.0]], [[ 7.0], [ 8.0]]], [[[ 9.0], [10.0]], [[11.0], [12.0]]]]> : tensor<3x2x2x1xf32> @@ -68,12 +68,12 @@ } func @conv2d_reorder_input_spatial() { - %inputs = iree.unfoldable_constant dense<[[ + %inputs = util.unfoldable_constant dense<[[ [[ 1.0, 2.0], [11.0, 12.0], [21.0, 22.0], [31.0, 32.0]], [[ 3.0, 4.0], [13.0, 14.0], [23.0, 24.0], [33.0, 34.0]], [[ 5.0, 6.0], [15.0, 16.0], [25.0, 26.0], [35.0, 36.0]], [[ 7.0, 8.0], [17.0, 18.0], [27.0, 28.0], [37.0, 38.0]]]]> : tensor<1x4x4x2xf32> - %weights = iree.unfoldable_constant dense<[ + %weights = util.unfoldable_constant dense<[ [[[ 1.0], [ 2.0]], [[ 3.0], [ 4.0]]], [[[ 5.0], [ 6.0]], [[ 7.0], [ 8.0]]], [[[ 9.0], [10.0]], [[11.0], [12.0]]]]> : tensor<3x2x2x1xf32> @@ -100,12 +100,12 @@ } func @conv2d_reorder_kernel() { - %inputs = iree.unfoldable_constant dense<[[ + %inputs = util.unfoldable_constant dense<[[ [[ 1.0, 2.0], [ 3.0, 4.0], [ 5.0, 6.0], [ 7.0, 8.0]], [[11.0, 12.0], [13.0, 14.0], [15.0, 16.0], [17.0, 18.0]], [[21.0, 22.0], [23.0, 24.0], [25.0, 26.0], [27.0, 28.0]], [[31.0, 32.0], [33.0, 34.0], [35.0, 36.0], [37.0, 38.0]]]]> : tensor<1x4x4x2xf32> - %weights = iree.unfoldable_constant dense< + %weights = util.unfoldable_constant dense< [[[[ 1.0, 3.0], [ 2.0, 4.0]], [[ 5.0, 7.0], [ 6.0, 8.0]], [[ 9.0, 11.0], [10.0, 12.0]]]]> : tensor<1x3x2x2xf32> @@ -132,12 +132,12 @@ } func @conv2d_reorder_output() { - %inputs = iree.unfoldable_constant dense<[[ + %inputs = util.unfoldable_constant dense<[[ [[ 1.0, 2.0], [ 3.0, 4.0], [ 5.0, 6.0], [ 7.0, 8.0]], [[11.0, 12.0], [13.0, 14.0], [15.0, 16.0], [17.0, 18.0]], [[21.0, 22.0], [23.0, 24.0], [25.0, 26.0], [27.0, 28.0]], [[31.0, 32.0], [33.0, 34.0], [35.0, 36.0], [37.0, 38.0]]]]> : tensor<1x4x4x2xf32> - %weights = iree.unfoldable_constant dense<[ + %weights = util.unfoldable_constant dense<[ [[[ 1.0], [ 2.0]], [[ 3.0], [ 4.0]]], [[[ 5.0], [ 6.0]], [[ 7.0], [ 8.0]]], [[[ 9.0], [10.0]], [[11.0], [12.0]]]]> : tensor<3x2x2x1xf32> @@ -165,12 +165,12 @@ } func @conv2d_1452x3221_same() { - %inputs = iree.unfoldable_constant dense<[[ + %inputs = util.unfoldable_constant dense<[[ [[ 1.0, 2.0], [ 3.0, 4.0], [ 5.0, 6.0], [ 7.0, 8.0], [ 9.0, 10.0]], [[11.0, 12.0], [13.0, 14.0], [15.0, 16.0], [17.0, 18.0], [19.0, 20.0]], [[21.0, 22.0], [23.0, 24.0], [25.0, 26.0], [27.0, 28.0], [29.0, 30.0]], [[31.0, 32.0], [33.0, 34.0], [35.0, 36.0], [37.0, 38.0], [39.0, 40.0]]]]> : tensor<1x4x5x2xf32> - %weights = iree.unfoldable_constant dense<[ + %weights = util.unfoldable_constant dense<[ [[[ 1.0], [ 2.0]], [[ 3.0], [ 4.0]]], [[[ 5.0], [ 6.0]], [[ 7.0], [ 8.0]]], [[[ 9.0], [10.0]], [[11.0], [12.0]]]]> : tensor<3x2x2x1xf32> @@ -200,7 +200,7 @@ } func @conv2d_2451x2311_same() { - %inputs = iree.unfoldable_constant dense<[ + %inputs = util.unfoldable_constant dense<[ [[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0]], [[ 6.0], [ 7.0], [ 8.0], [ 9.0], [10.0]], [[11.0], [12.0], [13.0], [14.0], [15.0]], @@ -209,7 +209,7 @@ [[26.0], [27.0], [28.0], [29.0], [30.0]], [[31.0], [32.0], [33.0], [34.0], [35.0]], [[36.0], [37.0], [38.0], [39.0], [40.0]]]]> : tensor <2x4x5x1xf32> - %weights = iree.unfoldable_constant dense<[ + %weights = util.unfoldable_constant dense<[ [[[1.0]], [[2.0]], [[3.0]]], [[[4.0]], [[5.0]], [[6.0]]]]> : tensor <2x3x1x1xf32> %res = "mhlo.convolution"(%inputs, %weights) { @@ -242,7 +242,7 @@ } func @conv2d_no_padding2() { - %inputs = iree.unfoldable_constant dense<[ + %inputs = util.unfoldable_constant dense<[ [[[ 1.0, 2.0, 3.0], [ 4.0, 5.0, 6.0], [ 7.0, 8.0, 9.0], @@ -283,7 +283,7 @@ [112.0, 113.0, 114.0], [115.0, 116.0, 117.0], [118.0, 119.0, 120.0]]]]> : tensor<2x4x5x3xf32> - %weights = iree.unfoldable_constant dense<[ + %weights = util.unfoldable_constant dense<[ [[[ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [ 7.0, 8.0, 9.0, 10.0, 11.0, 12.0], [ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0]], @@ -341,7 +341,7 @@ } func @conv2d_1452x2223_dilated_valid() { - %inputs = iree.unfoldable_constant dense< + %inputs = util.unfoldable_constant dense< [[[[0.09762701, 0.43037874], [ 0.20552675, 0.08976637], [-0.1526904, 0.29178822], @@ -362,7 +362,7 @@ [-0.9624204, 0.23527099], [ 0.22419144, 0.23386799], [ 0.8874962, 0.3636406 ]]]]> : tensor<1x4x5x2xf32> - %weights = iree.unfoldable_constant dense< + %weights = util.unfoldable_constant dense< [[[[-0.2809842, -0.12593609, 0.3952624 ], [-0.8795491, 0.33353344, 0.34127575]], [[-0.5792349, -0.7421474, -0.3691433 ], @@ -402,8 +402,8 @@ } func @depthwise_conv_non_1_channel_multiplier() { - %arg0 = iree.unfoldable_constant dense<1.0> : tensor<2x4x5x2xf32> - %arg1 = iree.unfoldable_constant dense<1.0> : tensor<2x2x2x3xf32> + %arg0 = util.unfoldable_constant dense<1.0> : tensor<2x4x5x2xf32> + %arg1 = util.unfoldable_constant dense<1.0> : tensor<2x2x2x3xf32> %res = "mhlo.convolution"(%arg0, %arg1) { batch_group_count = 1 : i64, dimension_numbers = {
diff --git a/iree/test/e2e/xla_ops/cosine.mlir b/iree/test/e2e/xla_ops/cosine.mlir index 88f290d..2a95d89 100644 --- a/iree/test/e2e/xla_ops/cosine.mlir +++ b/iree/test/e2e/xla_ops/cosine.mlir
@@ -1,12 +1,12 @@ func @tensor() { - %input = iree.unfoldable_constant dense<[0.0, 1.0, 1.5, 2.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[0.0, 1.0, 1.5, 2.0]> : tensor<4xf32> %result = "mhlo.cosine"(%input) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[1.0, 0.5403, 0.0707, -0.4161]> : tensor<4xf32>) : tensor<4xf32> return } func @scalar() { - %input = iree.unfoldable_constant dense<3.0> : tensor<f32> + %input = util.unfoldable_constant dense<3.0> : tensor<f32> %result = "mhlo.cosine"(%input) : (tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<-0.99> : tensor<f32>) : tensor<f32> return
diff --git a/iree/test/e2e/xla_ops/divide.mlir b/iree/test/e2e/xla_ops/divide.mlir index 01cf528..e2a3ff0 100644 --- a/iree/test/e2e/xla_ops/divide.mlir +++ b/iree/test/e2e/xla_ops/divide.mlir
@@ -1,14 +1,14 @@ func @i32() { - %0 = iree.unfoldable_constant dense<[5, 6, 7, 8]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[1, 2, 3, 4]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[5, 6, 7, 8]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[1, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.divide"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[5, 3, 2, 2]> : tensor<4xi32>) : tensor<4xi32> return } func @f32() { - %0 = iree.unfoldable_constant dense<[5.0, 6.0, 7.0, 8.0]> : tensor<4xf32> - %1 = iree.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[5.0, 6.0, 7.0, 8.0]> : tensor<4xf32> + %1 = util.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> %result = "mhlo.divide"(%0, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[5.0, 3.0, 2.333333, 2.0]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/xla_ops/dot.mlir b/iree/test/e2e/xla_ops/dot.mlir index 06ad5a0..77b37c6 100644 --- a/iree/test/e2e/xla_ops/dot.mlir +++ b/iree/test/e2e/xla_ops/dot.mlir
@@ -1,11 +1,11 @@ func @f32() { - %lhs = iree.unfoldable_constant dense<[ + %lhs = util.unfoldable_constant dense<[ [15.0, 14.0, 13.0], [12.0, 11.0, 10.0], [09.0, 08.0, 07.0], [06.0, 05.0, 04.0], [03.0, 02.0, 01.0]]> : tensor<5x3xf32> - %rhs = iree.unfoldable_constant dense<[ + %rhs = util.unfoldable_constant dense<[ [15.0, 14.0, 13.0, 12.0, 11.0], [10.0, 09.0, 08.0, 07.0, 06.0], [05.0, 04.0, 03.0, 02.0, 01.0]]> : tensor<3x5xf32> @@ -20,32 +20,32 @@ } func @i32i32.i32() { - %lhs = iree.unfoldable_constant dense<3> : tensor<2x4xi32> - %rhs = iree.unfoldable_constant dense<2> : tensor<4x2xi32> + %lhs = util.unfoldable_constant dense<3> : tensor<2x4xi32> + %rhs = util.unfoldable_constant dense<2> : tensor<4x2xi32> %res = "mhlo.dot"(%lhs, %rhs) : (tensor<2x4xi32>, tensor<4x2xi32>) -> tensor<2x2xi32> check.expect_eq_const(%res, dense<24> : tensor<2x2xi32>) : tensor<2x2xi32> return } func @i8i8.i32() { - %lhs = iree.unfoldable_constant dense<3> : tensor<2x4xi8> - %rhs = iree.unfoldable_constant dense<2> : tensor<4x2xi8> + %lhs = util.unfoldable_constant dense<3> : tensor<2x4xi8> + %rhs = util.unfoldable_constant dense<2> : tensor<4x2xi8> %res = "mhlo.dot"(%lhs, %rhs) : (tensor<2x4xi8>, tensor<4x2xi8>) -> tensor<2x2xi32> check.expect_eq_const(%res, dense<24> : tensor<2x2xi32>) : tensor<2x2xi32> return } func @i16i16.i32() { - %lhs = iree.unfoldable_constant dense<3> : tensor<2x4xi16> - %rhs = iree.unfoldable_constant dense<2> : tensor<4x2xi16> + %lhs = util.unfoldable_constant dense<3> : tensor<2x4xi16> + %rhs = util.unfoldable_constant dense<2> : tensor<4x2xi16> %res = "mhlo.dot"(%lhs, %rhs) : (tensor<2x4xi16>, tensor<4x2xi16>) -> tensor<2x2xi32> check.expect_eq_const(%res, dense<24> : tensor<2x2xi32>) : tensor<2x2xi32> return } func @large() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<250x1024xf32> - %rhs = iree.unfoldable_constant dense<0.4> : tensor<1024x500xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<250x1024xf32> + %rhs = util.unfoldable_constant dense<0.4> : tensor<1024x500xf32> %res = "mhlo.dot"(%lhs, %rhs) : (tensor<250x1024xf32>, tensor<1024x500xf32>) -> tensor<250x500xf32> check.expect_almost_eq_const(%res, dense<409.596> : tensor<250x500xf32>) : tensor<250x500xf32> return
diff --git a/iree/test/e2e/xla_ops/dot_general.mlir b/iree/test/e2e/xla_ops/dot_general.mlir index 25a7804..2bd2ba5 100644 --- a/iree/test/e2e/xla_ops/dot_general.mlir +++ b/iree/test/e2e/xla_ops/dot_general.mlir
@@ -1,6 +1,6 @@ func @dot_general_lower() { - %lhs = iree.unfoldable_constant dense<[[[0.3, 0.5]]]> : tensor<1x1x2xf32> - %rhs = iree.unfoldable_constant dense<[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]> : tensor<2x3xf32> + %lhs = util.unfoldable_constant dense<[[[0.3, 0.5]]]> : tensor<1x1x2xf32> + %rhs = util.unfoldable_constant dense<[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]> : tensor<2x3xf32> %res = "mhlo.dot_general"(%lhs, %rhs) { dot_dimension_numbers = { lhs_batching_dimensions = dense<[]> : tensor<0xi64>, @@ -15,8 +15,8 @@ } func @dot_general_lower_swapped() { - %lhs = iree.unfoldable_constant dense<[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]> : tensor<2x3xf32> - %rhs = iree.unfoldable_constant dense<[[[0.3, 0.5]]]> : tensor<1x1x2xf32> + %lhs = util.unfoldable_constant dense<[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]> : tensor<2x3xf32> + %rhs = util.unfoldable_constant dense<[[[0.3, 0.5]]]> : tensor<1x1x2xf32> %res = "mhlo.dot_general"(%lhs, %rhs) { dot_dimension_numbers = { lhs_batching_dimensions = dense<[]> : tensor<0xi64>, @@ -31,8 +31,8 @@ } func @dot_general_trivial_batching_dimension() { - %lhs = iree.unfoldable_constant dense<[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]]> : tensor<1x2x3xf32> - %rhs = iree.unfoldable_constant dense<[[ + %lhs = util.unfoldable_constant dense<[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]]> : tensor<1x2x3xf32> + %rhs = util.unfoldable_constant dense<[[ [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0]]]> : tensor<1x3x4xf32> @@ -50,8 +50,8 @@ } func @dot_general_matmul() { - %lhs = iree.unfoldable_constant dense<3.0> : tensor<2x4xf32> - %rhs = iree.unfoldable_constant dense<2.0> : tensor<4x2xf32> + %lhs = util.unfoldable_constant dense<3.0> : tensor<2x4xf32> + %rhs = util.unfoldable_constant dense<2.0> : tensor<4x2xf32> %res = "mhlo.dot_general"(%lhs, %rhs) { dot_dimension_numbers = { lhs_batching_dimensions = dense<> : tensor<0xi64>, @@ -64,8 +64,8 @@ } func @dot_general_matmul_i32.i32.i32() { - %lhs = iree.unfoldable_constant dense<3> : tensor<2x4xi32> - %rhs = iree.unfoldable_constant dense<2> : tensor<4x2xi32> + %lhs = util.unfoldable_constant dense<3> : tensor<2x4xi32> + %rhs = util.unfoldable_constant dense<2> : tensor<4x2xi32> %res = "mhlo.dot_general"(%lhs, %rhs) { dot_dimension_numbers = { lhs_batching_dimensions = dense<> : tensor<0xi64>, @@ -78,10 +78,10 @@ } func @dot_general_nontrivial_batching_dimension() { - %lhs = iree.unfoldable_constant dense<[ + %lhs = util.unfoldable_constant dense<[ [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]> : tensor<2x2x3xf32> - %rhs = iree.unfoldable_constant dense<[[ + %rhs = util.unfoldable_constant dense<[[ [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0] @@ -109,8 +109,8 @@ } func @large_dot_general() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<4x32x1024xf32> - %rhs = iree.unfoldable_constant dense<0.4> : tensor<4x1024x64xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<4x32x1024xf32> + %rhs = util.unfoldable_constant dense<0.4> : tensor<4x1024x64xf32> %res = "mhlo.dot_general"(%lhs, %rhs) { dot_dimension_numbers = { lhs_batching_dimensions = dense<0> : tensor<1xi64>, @@ -125,8 +125,8 @@ } func @large_dot_general2() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<4x32x1024xf32> - %rhs = iree.unfoldable_constant dense<0.4> : tensor<4x1024x64xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<4x32x1024xf32> + %rhs = util.unfoldable_constant dense<0.4> : tensor<4x1024x64xf32> %res = "mhlo.dot_general"(%lhs, %rhs) { dot_dimension_numbers = { lhs_batching_dimensions = dense<0> : tensor<1xi64>,
diff --git a/iree/test/e2e/xla_ops/dynamic_slice.mlir b/iree/test/e2e/xla_ops/dynamic_slice.mlir index 0e06549..7a2fd86 100644 --- a/iree/test/e2e/xla_ops/dynamic_slice.mlir +++ b/iree/test/e2e/xla_ops/dynamic_slice.mlir
@@ -1,10 +1,10 @@ func @dynamic_slice() { - %input = iree.unfoldable_constant dense<[ + %input = util.unfoldable_constant dense<[ [01, 02, 03, 04], [05, 06, 07, 08], [09, 10, 11, 12]]> : tensor<3x4xi32> - %start1 = iree.unfoldable_constant dense<1> : tensor<i64> - %start2 = iree.unfoldable_constant dense<2> : tensor<i64> + %start1 = util.unfoldable_constant dense<1> : tensor<i64> + %start2 = util.unfoldable_constant dense<2> : tensor<i64> %result = "mhlo.dynamic-slice"(%input, %start1, %start2) { slice_sizes = dense<[2, 2]> : tensor<2xi64> } : (tensor<3x4xi32>, tensor<i64>, tensor<i64>) -> tensor<2x2xi32> @@ -15,12 +15,12 @@ } func @dynamic_unit_slice() { - %input = iree.unfoldable_constant dense<[ + %input = util.unfoldable_constant dense<[ [01, 02, 03, 04], [05, 06, 07, 08], [09, 10, 11, 12]]> : tensor<3x4xi32> - %start1 = iree.unfoldable_constant dense<1> : tensor<i64> - %start2 = iree.unfoldable_constant dense<2> : tensor<i64> + %start1 = util.unfoldable_constant dense<1> : tensor<i64> + %start2 = util.unfoldable_constant dense<2> : tensor<i64> %result = "mhlo.dynamic-slice"(%input, %start1, %start2) { slice_sizes = dense<[1, 2]> : tensor<2xi64> } : (tensor<3x4xi32>, tensor<i64>, tensor<i64>) -> tensor<1x2xi32> @@ -30,8 +30,8 @@ } func @dynamic_1d_slice() { - %input = iree.unfoldable_constant dense<[1, 2, 3, 4]> : tensor<4xi32> - %start1 = iree.unfoldable_constant dense<1> : tensor<i64> + %input = util.unfoldable_constant dense<[1, 2, 3, 4]> : tensor<4xi32> + %start1 = util.unfoldable_constant dense<1> : tensor<i64> %result = "mhlo.dynamic-slice"(%input, %start1) { slice_sizes = dense<[2]> : tensor<1xi64> } : (tensor<4xi32>, tensor<i64>) -> tensor<2xi32>
diff --git a/iree/test/e2e/xla_ops/dynamic_update_slice.mlir b/iree/test/e2e/xla_ops/dynamic_update_slice.mlir index 0388f3a..3cf9bca 100644 --- a/iree/test/e2e/xla_ops/dynamic_update_slice.mlir +++ b/iree/test/e2e/xla_ops/dynamic_update_slice.mlir
@@ -1,7 +1,7 @@ func @dynamic_update_slice_2x2() { - %target = iree.unfoldable_constant dense<2> : tensor<3x3xi32> - %update = iree.unfoldable_constant dense<1> : tensor<2x2xi32> - %c0 = iree.unfoldable_constant dense<0> : tensor<i32> + %target = util.unfoldable_constant dense<2> : tensor<3x3xi32> + %update = util.unfoldable_constant dense<1> : tensor<2x2xi32> + %c0 = util.unfoldable_constant dense<0> : tensor<i32> %result = "mhlo.dynamic-update-slice"(%target, %update, %c0, %c0) : (tensor<3x3xi32>, tensor<2x2xi32>, tensor<i32>, tensor<i32>) -> tensor<3x3xi32> check.expect_eq_const(%result, dense<[ @@ -12,10 +12,10 @@ } func @dynamic_update_slice_1x3() { - %target = iree.unfoldable_constant dense<2> : tensor<3x3xi32> - %update = iree.unfoldable_constant dense<1> : tensor<1x3xi32> - %c0 = iree.unfoldable_constant dense<0> : tensor<i32> - %c1 = iree.unfoldable_constant dense<1> : tensor<i32> + %target = util.unfoldable_constant dense<2> : tensor<3x3xi32> + %update = util.unfoldable_constant dense<1> : tensor<1x3xi32> + %c0 = util.unfoldable_constant dense<0> : tensor<i32> + %c1 = util.unfoldable_constant dense<1> : tensor<i32> %result = "mhlo.dynamic-update-slice"(%target, %update, %c1, %c0) : (tensor<3x3xi32>, tensor<1x3xi32>, tensor<i32>, tensor<i32>) -> tensor<3x3xi32> check.expect_eq_const(%result, dense<[ @@ -26,7 +26,7 @@ } func @into_constant() { - %update = iree.unfoldable_constant dense<2> : tensor<1xi32> + %update = util.unfoldable_constant dense<2> : tensor<1xi32> %target = mhlo.constant dense<1> : tensor<4xi32> %index = mhlo.constant dense<0> : tensor<i32> %result = "mhlo.dynamic-update-slice"(%target, %update, %index) : (tensor<4xi32>, tensor<1xi32>, tensor<i32>) -> tensor<4xi32>
diff --git a/iree/test/e2e/xla_ops/exponential.mlir b/iree/test/e2e/xla_ops/exponential.mlir index 294a57c..0ef7778 100644 --- a/iree/test/e2e/xla_ops/exponential.mlir +++ b/iree/test/e2e/xla_ops/exponential.mlir
@@ -1,26 +1,26 @@ func @tensor() { - %input = iree.unfoldable_constant dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32> %result = "mhlo.exponential"(%input) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[1.0, 2.7183, 7.3891, 54.5981]> : tensor<4xf32>) : tensor<4xf32> return } func @scalar() { - %input = iree.unfoldable_constant dense<1.0> : tensor<f32> + %input = util.unfoldable_constant dense<1.0> : tensor<f32> %result = "mhlo.exponential"(%input) : (tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<2.7183> : tensor<f32>) : tensor<f32> return } func @double() { - %input = iree.unfoldable_constant dense<1.0> : tensor<f64> + %input = util.unfoldable_constant dense<1.0> : tensor<f64> %result = "mhlo.exponential"(%input) : (tensor<f64>) -> tensor<f64> check.expect_almost_eq_const(%result, dense<2.7183> : tensor<f64>) : tensor<f64> return } func @negative() { - %input = iree.unfoldable_constant dense<-1.0> : tensor<f32> + %input = util.unfoldable_constant dense<-1.0> : tensor<f32> %result = "mhlo.exponential"(%input) : (tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<0.367879> : tensor<f32>) : tensor<f32> return
diff --git a/iree/test/e2e/xla_ops/exponential_minus_one.mlir b/iree/test/e2e/xla_ops/exponential_minus_one.mlir index c9b0b3d..8c608f5 100644 --- a/iree/test/e2e/xla_ops/exponential_minus_one.mlir +++ b/iree/test/e2e/xla_ops/exponential_minus_one.mlir
@@ -1,5 +1,5 @@ func @exponential_minus_one() { - %input = iree.unfoldable_constant dense<[0.0, 0.5, 1.0, -1.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[0.0, 0.5, 1.0, -1.0]> : tensor<4xf32> %result = "mhlo.exponential_minus_one"(%input) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[0.0, 0.6487213, 1.7182818, -0.6321205]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/xla_ops/fft.mlir b/iree/test/e2e/xla_ops/fft.mlir index b6b8d65..898230d 100644 --- a/iree/test/e2e/xla_ops/fft.mlir +++ b/iree/test/e2e/xla_ops/fft.mlir
@@ -1,7 +1,7 @@ // TODO(hanchung): Add other types of fft tests, e.g. fft, ifft, irfft. func @rfft_1d() { - %input = iree.unfoldable_constant dense<[ + %input = util.unfoldable_constant dense<[ 9.0, 1.0, 4.5, -0.3, 10.0, -1.0, 5.5, 0.3, 299.0, 3.5, -0.777, 2.0, 1.7, 3.5, -4.5, 0.0, 9.0, 1.0, 4.5, -0.3, 10.0, -1.0, 5.5, 0.3, 299.0, 3.5, -0.777, 2.0, 1.7, 3.5, -4.5, 0.0]> : tensor<32xf32> @@ -16,7 +16,7 @@ } func @rfft_2d() { - %input = iree.unfoldable_constant dense<[[ + %input = util.unfoldable_constant dense<[[ 9.0, 1.0, 4.5, -0.3, 10.0, -1.0, 5.5, 0.3, 299.0, 3.5, -0.777, 2.0, 1.7, 3.5, -4.5, 0.0, 9.0, 1.0, 4.5, -0.3, 10.0, -1.0, 5.5, 0.3, 299.0, 3.5, -0.777, 2.0, 1.7, 3.5, -4.5, 0.0]]> : tensor<1x32xf32>
diff --git a/iree/test/e2e/xla_ops/finite.mlir b/iree/test/e2e/xla_ops/finite.mlir index 8615c6d..5ffa276 100644 --- a/iree/test/e2e/xla_ops/finite.mlir +++ b/iree/test/e2e/xla_ops/finite.mlir
@@ -1,10 +1,10 @@ func @f32() { - %0 = iree.unfoldable_constant dense<[1.0, 6.0, -6.0, 0.0]> : tensor<4xf32> - %1 = iree.unfoldable_constant dense<[0.0, 2.0, 3.0, 4.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[1.0, 6.0, -6.0, 0.0]> : tensor<4xf32> + %1 = util.unfoldable_constant dense<[0.0, 2.0, 3.0, 4.0]> : tensor<4xf32> %2 = "mhlo.divide"(%0, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> %result = "mhlo.is_finite"(%2) : (tensor<4xf32>) -> tensor<4xi1> - %c0 = iree.unfoldable_constant dense<0> : tensor<4xi8> - %c1 = iree.unfoldable_constant dense<1> : tensor<4xi8> + %c0 = util.unfoldable_constant dense<0> : tensor<4xi8> + %c1 = util.unfoldable_constant dense<1> : tensor<4xi8> %output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8> check.expect_eq_const(%output, dense<[0, 1, 1, 1]> : tensor<4xi8>) : tensor<4xi8> return
diff --git a/iree/test/e2e/xla_ops/floor.mlir b/iree/test/e2e/xla_ops/floor.mlir index a1e1abf..3d89b9c 100644 --- a/iree/test/e2e/xla_ops/floor.mlir +++ b/iree/test/e2e/xla_ops/floor.mlir
@@ -1,19 +1,19 @@ func @tensor() { - %input = iree.unfoldable_constant dense<[0.0, 1.1, 2.5, 4.9]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[0.0, 1.1, 2.5, 4.9]> : tensor<4xf32> %result = "mhlo.floor"(%input) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32>): tensor<4xf32> return } func @scalar() { - %input = iree.unfoldable_constant dense<101.3> : tensor<f32> + %input = util.unfoldable_constant dense<101.3> : tensor<f32> %result = "mhlo.floor"(%input) : (tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<101.0> : tensor<f32>): tensor<f32> return } func @negative() { - %input = iree.unfoldable_constant dense<-1.1> : tensor<f32> + %input = util.unfoldable_constant dense<-1.1> : tensor<f32> %result = "mhlo.floor"(%input) : (tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<-2.0> : tensor<f32>): tensor<f32> return
diff --git a/iree/test/e2e/xla_ops/gather.mlir b/iree/test/e2e/xla_ops/gather.mlir index 5e6d6e9..79202d9 100644 --- a/iree/test/e2e/xla_ops/gather.mlir +++ b/iree/test/e2e/xla_ops/gather.mlir
@@ -1,11 +1,11 @@ func @foo() { - %input = iree.unfoldable_constant dense<[ + %input = util.unfoldable_constant dense<[ [[01, 02, 03, 04, 05]], [[06, 07, 08, 09, 10]], [[11, 12, 13, 14, 15]], [[16, 17, 18, 19, 20]], [[21, 22, 23, 24, 25]]]> : tensor<5x1x5xi32> - %start_indices = iree.unfoldable_constant dense<2> : tensor<i64> + %start_indices = util.unfoldable_constant dense<2> : tensor<i64> %res = "mhlo.gather"(%input, %start_indices) { dimension_numbers = { collapsed_slice_dims = dense<0> : tensor<1xi64>,
diff --git a/iree/test/e2e/xla_ops/log.mlir b/iree/test/e2e/xla_ops/log.mlir index 4ccb135..4368008 100644 --- a/iree/test/e2e/xla_ops/log.mlir +++ b/iree/test/e2e/xla_ops/log.mlir
@@ -1,19 +1,19 @@ func @tensor() { - %input = iree.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> %result = "mhlo.log"(%input) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[0.0, 0.693147, 1.09861, 1.38629]> : tensor<4xf32>) : tensor<4xf32> return } func @scalar() { - %input = iree.unfoldable_constant dense<4.0> : tensor<f32> + %input = util.unfoldable_constant dense<4.0> : tensor<f32> %result = "mhlo.log"(%input) : (tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<1.3863> : tensor<f32>) : tensor<f32> return } func @double() { - %input = iree.unfoldable_constant dense<4.0> : tensor<f64> + %input = util.unfoldable_constant dense<4.0> : tensor<f64> %result = "mhlo.log"(%input) : (tensor<f64>) -> tensor<f64> check.expect_almost_eq_const(%result, dense<1.3863> : tensor<f64>) : tensor<f64> return
diff --git a/iree/test/e2e/xla_ops/log_plus_one.mlir b/iree/test/e2e/xla_ops/log_plus_one.mlir index 4b4bfad..70e7120 100644 --- a/iree/test/e2e/xla_ops/log_plus_one.mlir +++ b/iree/test/e2e/xla_ops/log_plus_one.mlir
@@ -1,5 +1,5 @@ func @log_plus_one() { - %input = iree.unfoldable_constant dense<[0.0, 0.5, 1.0, 5.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[0.0, 0.5, 1.0, 5.0]> : tensor<4xf32> %result = "mhlo.log_plus_one"(%input) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[0.0, 0.4054651, 0.6931472, 1.7917595]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/xla_ops/maximum.mlir b/iree/test/e2e/xla_ops/maximum.mlir index 4c0ccf2..deac9d0 100644 --- a/iree/test/e2e/xla_ops/maximum.mlir +++ b/iree/test/e2e/xla_ops/maximum.mlir
@@ -1,86 +1,86 @@ func @tensor_i32() { - %lhs = iree.unfoldable_constant dense<[1, 6, 7, 8]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 6, 3, 8]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 6, 7, 8]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 6, 3, 8]> : tensor<4xi32> %result = "mhlo.maximum"(%lhs, %rhs) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[5, 6, 7, 8]> : tensor<4xi32>) : tensor<4xi32> return } func @tensor_odd_dim() { - %lhs = iree.unfoldable_constant dense<[1, 6, 7]> : tensor<3xi32> - %rhs = iree.unfoldable_constant dense<[5, 6, 3]> : tensor<3xi32> + %lhs = util.unfoldable_constant dense<[1, 6, 7]> : tensor<3xi32> + %rhs = util.unfoldable_constant dense<[5, 6, 3]> : tensor<3xi32> %result = "mhlo.maximum"(%lhs, %rhs) : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi32> check.expect_eq_const(%result, dense<[5, 6,7]> : tensor<3xi32>) : tensor<3xi32> return } func @scalar_i32() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i32> - %rhs = iree.unfoldable_constant dense<2> : tensor<i32> + %lhs = util.unfoldable_constant dense<1> : tensor<i32> + %rhs = util.unfoldable_constant dense<2> : tensor<i32> %result = "mhlo.maximum"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32> check.expect_eq_const(%result, dense<2> : tensor<i32>) : tensor<i32> return } func @negative_i32() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i32> - %rhs = iree.unfoldable_constant dense<-2> : tensor<i32> + %lhs = util.unfoldable_constant dense<1> : tensor<i32> + %rhs = util.unfoldable_constant dense<-2> : tensor<i32> %result = "mhlo.maximum"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32> check.expect_eq_const(%result, dense<1> : tensor<i32>) : tensor<i32> return } func @i8() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i8> - %rhs = iree.unfoldable_constant dense<2> : tensor<i8> + %lhs = util.unfoldable_constant dense<1> : tensor<i8> + %rhs = util.unfoldable_constant dense<2> : tensor<i8> %result = "mhlo.maximum"(%lhs, %rhs) : (tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%result, dense<2> : tensor<i8>) : tensor<i8> return } func @i16() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i16> - %rhs = iree.unfoldable_constant dense<2> : tensor<i16> + %lhs = util.unfoldable_constant dense<1> : tensor<i16> + %rhs = util.unfoldable_constant dense<2> : tensor<i16> %result = "mhlo.maximum"(%lhs, %rhs) : (tensor<i16>, tensor<i16>) -> tensor<i16> check.expect_eq_const(%result, dense<2> : tensor<i16>) : tensor<i16> return } func @i64() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i64> - %rhs = iree.unfoldable_constant dense<2> : tensor<i64> + %lhs = util.unfoldable_constant dense<1> : tensor<i64> + %rhs = util.unfoldable_constant dense<2> : tensor<i64> %result = "mhlo.maximum"(%lhs, %rhs) : (tensor<i64>, tensor<i64>) -> tensor<i64> check.expect_eq_const(%result, dense<2> : tensor<i64>) : tensor<i64> return } func @tensor_f32() { - %lhs = iree.unfoldable_constant dense<[1.0, 2.0, 7.0, 4.0]> : tensor<4xf32> - %rhs = iree.unfoldable_constant dense<[5.0, 2.0, 3.0, 4.0]> : tensor<4xf32> + %lhs = util.unfoldable_constant dense<[1.0, 2.0, 7.0, 4.0]> : tensor<4xf32> + %rhs = util.unfoldable_constant dense<[5.0, 2.0, 3.0, 4.0]> : tensor<4xf32> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32>) : tensor<4xf32> return } func @scalar_f32() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<f32> - %rhs = iree.unfoldable_constant dense<2.0> : tensor<f32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<f32> + %rhs = util.unfoldable_constant dense<2.0> : tensor<f32> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<1.0> : tensor<f32>) : tensor<f32> return } func @double() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<f64> - %rhs = iree.unfoldable_constant dense<2.0> : tensor<f64> + %lhs = util.unfoldable_constant dense<1.0> : tensor<f64> + %rhs = util.unfoldable_constant dense<2.0> : tensor<f64> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<f64>, tensor<f64>) -> tensor<f64> check.expect_almost_eq_const(%result, dense<1.0> : tensor<f64>) : tensor<f64> return } func @negative_f32() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<f32> - %rhs = iree.unfoldable_constant dense<-2.0> : tensor<f32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<f32> + %rhs = util.unfoldable_constant dense<-2.0> : tensor<f32> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<-2.0> : tensor<f32>) : tensor<f32> return
diff --git a/iree/test/e2e/xla_ops/minimum.mlir b/iree/test/e2e/xla_ops/minimum.mlir index 2eb6a15..2eb04ca 100644 --- a/iree/test/e2e/xla_ops/minimum.mlir +++ b/iree/test/e2e/xla_ops/minimum.mlir
@@ -1,86 +1,86 @@ func @tensor_i32() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[1, 2, 3, 4]> : tensor<4xi32>) : tensor<4xi32> return } func @tensor_odd_dim() { - %lhs = iree.unfoldable_constant dense<[1, 2, 7]> : tensor<3xi32> - %rhs = iree.unfoldable_constant dense<[5, 2, 3]> : tensor<3xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 7]> : tensor<3xi32> + %rhs = util.unfoldable_constant dense<[5, 2, 3]> : tensor<3xi32> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi32> check.expect_eq_const(%result, dense<[1, 2, 3]> : tensor<3xi32>) : tensor<3xi32> return } func @scalar_i32() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i32> - %rhs = iree.unfoldable_constant dense<2> : tensor<i32> + %lhs = util.unfoldable_constant dense<1> : tensor<i32> + %rhs = util.unfoldable_constant dense<2> : tensor<i32> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32> check.expect_eq_const(%result, dense<1> : tensor<i32>) : tensor<i32> return } func @negative_i32() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i32> - %rhs = iree.unfoldable_constant dense<-2> : tensor<i32> + %lhs = util.unfoldable_constant dense<1> : tensor<i32> + %rhs = util.unfoldable_constant dense<-2> : tensor<i32> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32> check.expect_eq_const(%result, dense<-2> : tensor<i32>) : tensor<i32> return } func @i8() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i8> - %rhs = iree.unfoldable_constant dense<2> : tensor<i8> + %lhs = util.unfoldable_constant dense<1> : tensor<i8> + %rhs = util.unfoldable_constant dense<2> : tensor<i8> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<i8>, tensor<i8>) -> tensor<i8> check.expect_eq_const(%result, dense<1> : tensor<i8>) : tensor<i8> return } func @i16() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i16> - %rhs = iree.unfoldable_constant dense<2> : tensor<i16> + %lhs = util.unfoldable_constant dense<1> : tensor<i16> + %rhs = util.unfoldable_constant dense<2> : tensor<i16> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<i16>, tensor<i16>) -> tensor<i16> check.expect_eq_const(%result, dense<1> : tensor<i16>) : tensor<i16> return } func @i64() { - %lhs = iree.unfoldable_constant dense<1> : tensor<i64> - %rhs = iree.unfoldable_constant dense<2> : tensor<i64> + %lhs = util.unfoldable_constant dense<1> : tensor<i64> + %rhs = util.unfoldable_constant dense<2> : tensor<i64> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<i64>, tensor<i64>) -> tensor<i64> check.expect_eq_const(%result, dense<1> : tensor<i64>) : tensor<i64> return } func @tensor_f32() { - %lhs = iree.unfoldable_constant dense<[1.0, 2.0, 7.0, 4.0]> : tensor<4xf32> - %rhs = iree.unfoldable_constant dense<[5.0, 2.0, 3.0, 4.0]> : tensor<4xf32> + %lhs = util.unfoldable_constant dense<[1.0, 2.0, 7.0, 4.0]> : tensor<4xf32> + %rhs = util.unfoldable_constant dense<[5.0, 2.0, 3.0, 4.0]> : tensor<4xf32> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32>) : tensor<4xf32> return } func @scalar_f32() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<f32> - %rhs = iree.unfoldable_constant dense<2.0> : tensor<f32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<f32> + %rhs = util.unfoldable_constant dense<2.0> : tensor<f32> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<1.0> : tensor<f32>) : tensor<f32> return } func @double() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<f64> - %rhs = iree.unfoldable_constant dense<2.0> : tensor<f64> + %lhs = util.unfoldable_constant dense<1.0> : tensor<f64> + %rhs = util.unfoldable_constant dense<2.0> : tensor<f64> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<f64>, tensor<f64>) -> tensor<f64> check.expect_almost_eq_const(%result, dense<1.0> : tensor<f64>) : tensor<f64> return } func @negative_f32() { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<f32> - %rhs = iree.unfoldable_constant dense<-2.0> : tensor<f32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<f32> + %rhs = util.unfoldable_constant dense<-2.0> : tensor<f32> %result = "mhlo.minimum"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<-2.0> : tensor<f32>) : tensor<f32> return
diff --git a/iree/test/e2e/xla_ops/multiply.mlir b/iree/test/e2e/xla_ops/multiply.mlir index 4856ae2..bb3711f 100644 --- a/iree/test/e2e/xla_ops/multiply.mlir +++ b/iree/test/e2e/xla_ops/multiply.mlir
@@ -1,5 +1,5 @@ func @multiply () { - %c2 = iree.unfoldable_constant dense<2.0> : tensor<f32> + %c2 = util.unfoldable_constant dense<2.0> : tensor<f32> %res = "mhlo.multiply"(%c2, %c2) : (tensor<f32>, tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%res, dense<4.0> : tensor<f32>) : tensor<f32> return
diff --git a/iree/test/e2e/xla_ops/negate.mlir b/iree/test/e2e/xla_ops/negate.mlir index fd91947..00ba4bd 100644 --- a/iree/test/e2e/xla_ops/negate.mlir +++ b/iree/test/e2e/xla_ops/negate.mlir
@@ -1,12 +1,12 @@ func @tensor() { - %input = iree.unfoldable_constant dense<[-1.0, -2.0, 3.0, 4.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[-1.0, -2.0, 3.0, 4.0]> : tensor<4xf32> %result = "mhlo.negate"(%input) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[1.0, 2.0, -3.0, -4.0]> : tensor<4xf32>) : tensor<4xf32> return } func @scalar() { - %input = iree.unfoldable_constant dense<-4.0> : tensor<f32> + %input = util.unfoldable_constant dense<-4.0> : tensor<f32> %result = "mhlo.negate"(%input) : (tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<4.0> : tensor<f32>) : tensor<f32> return
diff --git a/iree/test/e2e/xla_ops/pad.mlir b/iree/test/e2e/xla_ops/pad.mlir index f30fc85..a5210a5 100644 --- a/iree/test/e2e/xla_ops/pad.mlir +++ b/iree/test/e2e/xla_ops/pad.mlir
@@ -1,5 +1,5 @@ func @pad_test() { - %input = iree.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> + %input = util.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> %c0 = constant dense<0> : tensor<i32> %res = "mhlo.pad"(%input, %c0) { edge_padding_low = dense<[0, 1]> : tensor<2xi64>, @@ -14,7 +14,7 @@ } func @pad_no_op() { - %input = iree.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> + %input = util.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> %c0 = constant dense<0> : tensor<i32> %res = "mhlo.pad"(%input, %c0) {edge_padding_high = dense<[0, 0]> : tensor<2xi64>, edge_padding_low = dense<[0, 0]> : tensor<2xi64>, interior_padding = dense<0> : tensor<2xi64>} : (tensor<2x3xi32>, tensor<i32>) -> tensor<2x3xi32> check.expect_eq(%res, %input) : tensor<2x3xi32>
diff --git a/iree/test/e2e/xla_ops/partial/add_f16.mlir b/iree/test/e2e/xla_ops/partial/add_f16.mlir index d45d39e..8cb89b8 100644 --- a/iree/test/e2e/xla_ops/partial/add_f16.mlir +++ b/iree/test/e2e/xla_ops/partial/add_f16.mlir
@@ -1,6 +1,6 @@ func @add_f16() { - %0 = iree.unfoldable_constant dense<[1.5, 2.0, 3.0, 4.2]> : tensor<4xf16> - %1 = iree.unfoldable_constant dense<[5.0, 6.2, 7.0, 8.1]> : tensor<4xf16> + %0 = util.unfoldable_constant dense<[1.5, 2.0, 3.0, 4.2]> : tensor<4xf16> + %1 = util.unfoldable_constant dense<[5.0, 6.2, 7.0, 8.1]> : tensor<4xf16> %result = "mhlo.add"(%0, %1) : (tensor<4xf16>, tensor<4xf16>) -> tensor<4xf16> check.expect_almost_eq_const(%result, dense<[6.5, 8.2, 10.0, 12.3]> : tensor<4xf16>) : tensor<4xf16> return
diff --git a/iree/test/e2e/xla_ops/partial/dot_f16.mlir b/iree/test/e2e/xla_ops/partial/dot_f16.mlir index 78fbdea..b2676b6 100644 --- a/iree/test/e2e/xla_ops/partial/dot_f16.mlir +++ b/iree/test/e2e/xla_ops/partial/dot_f16.mlir
@@ -1,11 +1,11 @@ func @f16() { - %lhs = iree.unfoldable_constant dense<[ + %lhs = util.unfoldable_constant dense<[ [15.0, 14.0, 13.0], [12.0, 11.0, 10.0], [09.0, 08.0, 07.0], [06.0, 05.0, 04.0], [03.0, 02.0, 01.0]]> : tensor<5x3xf16> - %rhs = iree.unfoldable_constant dense<[ + %rhs = util.unfoldable_constant dense<[ [15.0, 14.0, 13.0, 12.0, 11.0], [10.0, 09.0, 08.0, 07.0, 06.0], [05.0, 04.0, 03.0, 02.0, 01.0]]> : tensor<3x5xf16>
diff --git a/iree/test/e2e/xla_ops/reduce.mlir b/iree/test/e2e/xla_ops/reduce.mlir index c7b11b5..f0b9e53 100644 --- a/iree/test/e2e/xla_ops/reduce.mlir +++ b/iree/test/e2e/xla_ops/reduce.mlir
@@ -1,7 +1,7 @@ // Int sum values from [1, 10] func @reduce_sum_1x10xi32() { - %0 = iree.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]> : tensor<1x10xi32> - %1 = iree.unfoldable_constant dense<0> : tensor<i32> + %0 = util.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]> : tensor<1x10xi32> + %1 = util.unfoldable_constant dense<0> : tensor<i32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32> @@ -13,8 +13,8 @@ // Int max values from [1, 10] func @reduce_max_1x10xi32() { - %0 = iree.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]> : tensor<1x10xi32> - %1 = iree.unfoldable_constant dense<0> : tensor<i32> + %0 = util.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]> : tensor<1x10xi32> + %1 = util.unfoldable_constant dense<0> : tensor<i32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors %3 = "mhlo.maximum"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32> @@ -26,8 +26,8 @@ // Int min values, along multiple dimensions. Expected to just be a reshape in this case. func @reduce_min_5x1x1xi32() { - %0 = iree.unfoldable_constant dense<[[[1]],[[2]],[[3]],[[4]],[[5]]]> : tensor<5x1x1xi32> - %1 = iree.unfoldable_constant dense<999> : tensor<i32> + %0 = util.unfoldable_constant dense<[[[1]],[[2]],[[3]],[[4]],[[5]]]> : tensor<5x1x1xi32> + %1 = util.unfoldable_constant dense<999> : tensor<i32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors %3 = "mhlo.minimum"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32> @@ -42,10 +42,10 @@ // https://www.tensorflow.org/xla/operation_semantics#reduce func @reduce_sum_2x3xi32_dim0() { - %0 = iree.unfoldable_constant dense<[ + %0 = util.unfoldable_constant dense<[ [1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> - %1 = iree.unfoldable_constant dense<0> : tensor<i32> + %1 = util.unfoldable_constant dense<0> : tensor<i32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32> @@ -56,10 +56,10 @@ } func @reduce_sum_2x3xi32_dim1() { - %0 = iree.unfoldable_constant dense<[ + %0 = util.unfoldable_constant dense<[ [1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> - %1 = iree.unfoldable_constant dense<0> : tensor<i32> + %1 = util.unfoldable_constant dense<0> : tensor<i32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32> @@ -70,12 +70,12 @@ } func @reduce_sum_4x2x3xi32_dim0() { - %0 = iree.unfoldable_constant dense<[ + %0 = util.unfoldable_constant dense<[ [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]]> : tensor<4x2x3xi32> - %1 = iree.unfoldable_constant dense<0> : tensor<i32> + %1 = util.unfoldable_constant dense<0> : tensor<i32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32> @@ -86,12 +86,12 @@ } func @reduce_sum_4x2x3xi32_dim2() { - %0 = iree.unfoldable_constant dense<[ + %0 = util.unfoldable_constant dense<[ [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]]> : tensor<4x2x3xi32> - %1 = iree.unfoldable_constant dense<0> : tensor<i32> + %1 = util.unfoldable_constant dense<0> : tensor<i32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32> @@ -102,12 +102,12 @@ } func @reduce_sum_4x2x3xi32_dims_0_1() { - %0 = iree.unfoldable_constant dense<[ + %0 = util.unfoldable_constant dense<[ [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]]> : tensor<4x2x3xi32> - %1 = iree.unfoldable_constant dense<0> : tensor<i32> + %1 = util.unfoldable_constant dense<0> : tensor<i32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32> @@ -118,12 +118,12 @@ } func @reduce_sum_4x2x3xi32_dims_0_1_2() { - %0 = iree.unfoldable_constant dense<[ + %0 = util.unfoldable_constant dense<[ [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]]> : tensor<4x2x3xi32> - %1 = iree.unfoldable_constant dense<0> : tensor<i32> + %1 = util.unfoldable_constant dense<0> : tensor<i32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32> @@ -135,8 +135,8 @@ // Float sum values from [1.0, 10.0] func @reduce_sum_1x10xf32() { - %0 = iree.unfoldable_constant dense<[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]]> : tensor<1x10xf32> - %1 = iree.unfoldable_constant dense<0.0> : tensor<f32> + %0 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]]> : tensor<1x10xf32> + %1 = util.unfoldable_constant dense<0.0> : tensor<f32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32> @@ -148,8 +148,8 @@ // Float max values from [1.0, 10.0] func @reduce_max_1x10xf32() { - %0 = iree.unfoldable_constant dense<[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]]> : tensor<1x10xf32> - %1 = iree.unfoldable_constant dense<0.0> : tensor<f32> + %0 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]]> : tensor<1x10xf32> + %1 = util.unfoldable_constant dense<0.0> : tensor<f32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors @@ -163,8 +163,8 @@ // Float min values, along multiple dimensions. Expected to just be a reshape in this case. func @reduce_min_5x1x1xf32() { - %0 = iree.unfoldable_constant dense<[[[1.0]],[[2.0]],[[3.0]],[[4.0]],[[5.0]]]> : tensor<5x1x1xf32> - %1 = iree.unfoldable_constant dense<999.0> : tensor<f32> + %0 = util.unfoldable_constant dense<[[[1.0]],[[2.0]],[[3.0]],[[4.0]],[[5.0]]]> : tensor<5x1x1xf32> + %1 = util.unfoldable_constant dense<999.0> : tensor<f32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors %3 = "mhlo.minimum"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32> @@ -178,8 +178,8 @@ // https://www.tensorflow.org/xla/operation_semantics#reduce func @reduce_sum_2x3xf32_dim0() { - %0 = iree.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> - %1 = iree.unfoldable_constant dense<0.0> : tensor<f32> + %0 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> + %1 = util.unfoldable_constant dense<0.0> : tensor<f32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32> @@ -190,8 +190,8 @@ } func @reduce_sum_2x3xf32_dim1() { - %0 = iree.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> - %1 = iree.unfoldable_constant dense<0.0> : tensor<f32> + %0 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> + %1 = util.unfoldable_constant dense<0.0> : tensor<f32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32> @@ -202,12 +202,12 @@ } func @reduce_sum_4x2x3xf32_dim0() { - %0 = iree.unfoldable_constant dense<[ + %0 = util.unfoldable_constant dense<[ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]> : tensor<4x2x3xf32> - %1 = iree.unfoldable_constant dense<0.0> : tensor<f32> + %1 = util.unfoldable_constant dense<0.0> : tensor<f32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32> @@ -218,12 +218,12 @@ } func @reduce_sum_4x2x3xf32_dim1() { - %0 = iree.unfoldable_constant dense<[ + %0 = util.unfoldable_constant dense<[ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]> : tensor<4x2x3xf32> - %1 = iree.unfoldable_constant dense<0.0> : tensor<f32> + %1 = util.unfoldable_constant dense<0.0> : tensor<f32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32> @@ -238,12 +238,12 @@ } func @reduce_sum_4x2x3xf32_dim2() { - %0 = iree.unfoldable_constant dense<[ + %0 = util.unfoldable_constant dense<[ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]> : tensor<4x2x3xf32> - %1 = iree.unfoldable_constant dense<0.0> : tensor<f32> + %1 = util.unfoldable_constant dense<0.0> : tensor<f32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32> @@ -258,12 +258,12 @@ } func @reduce_sum_4x2x3xf32_dims_0_1() { - %0 = iree.unfoldable_constant dense<[ + %0 = util.unfoldable_constant dense<[ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]> : tensor<4x2x3xf32> - %1 = iree.unfoldable_constant dense<0.0> : tensor<f32> + %1 = util.unfoldable_constant dense<0.0> : tensor<f32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32> @@ -274,12 +274,12 @@ } func @reduce_sum_4x2x3xf32_dims_0_1_2() { - %0 = iree.unfoldable_constant dense<[ + %0 = util.unfoldable_constant dense<[ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]> : tensor<4x2x3xf32> - %1 = iree.unfoldable_constant dense<0.0> : tensor<f32> + %1 = util.unfoldable_constant dense<0.0> : tensor<f32> %res = "mhlo.reduce"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32> @@ -292,8 +292,8 @@ func @reducemulti_result() { %cst0 = mhlo.constant dense<-2147483648> : tensor<i32> %cst1 = mhlo.constant dense<0> : tensor<i32> - %arg0 = iree.unfoldable_constant dense<[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], [17, 18]]> : tensor<9x2xi32> - %arg1 = iree.unfoldable_constant dense<[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13], [14, 15], [16, 17]]> : tensor<9x2xi32> + %arg0 = util.unfoldable_constant dense<[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], [17, 18]]> : tensor<9x2xi32> + %arg1 = util.unfoldable_constant dense<[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13], [14, 15], [16, 17]]> : tensor<9x2xi32> %res0, %res1 = "mhlo.reduce"(%arg0, %arg1, %cst0, %cst1) ( { ^bb0(%arg2: tensor<i32>, %arg3: tensor<i32>, %arg4: tensor<i32>, %arg5: tensor<i32>): // no predecessors %0 = "mhlo.compare"(%arg2, %arg4) {comparison_direction = "GE"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
diff --git a/iree/test/e2e/xla_ops/reduce_window.mlir b/iree/test/e2e/xla_ops/reduce_window.mlir index fc48fc6..08efbc2 100644 --- a/iree/test/e2e/xla_ops/reduce_window.mlir +++ b/iree/test/e2e/xla_ops/reduce_window.mlir
@@ -1,9 +1,9 @@ func @reduce_window_nonoverlapping_1x4x6x1xf32() { - %0 = iree.unfoldable_constant dense<[[[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0], [ 6.0]], + %0 = util.unfoldable_constant dense<[[[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0], [ 6.0]], [[ 7.0], [ 8.0], [ 9.0], [10.0], [11.0], [12.0]], [[13.0], [14.0], [15.0], [16.0], [17.0], [18.0]], [[19.0], [20.0], [21.0], [22.0], [23.0], [24.0]]]]> : tensor<1x4x6x1xf32> - %1 = iree.unfoldable_constant dense<0.0> : tensor<f32> + %1 = util.unfoldable_constant dense<0.0> : tensor<f32> %res = "mhlo.reduce_window"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32> @@ -15,11 +15,11 @@ } func @reduce_window_overlapping_4x6xf32() { - %0 = iree.unfoldable_constant dense<[[[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0], [ 6.0]], + %0 = util.unfoldable_constant dense<[[[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0], [ 6.0]], [[ 7.0], [ 8.0], [ 9.0], [10.0], [11.0], [12.0]], [[13.0], [14.0], [15.0], [16.0], [17.0], [18.0]], [[19.0], [20.0], [21.0], [22.0], [23.0], [24.0]]]]> : tensor<1x4x6x1xf32> - %1 = iree.unfoldable_constant dense<0.0> : tensor<f32> + %1 = util.unfoldable_constant dense<0.0> : tensor<f32> %res = "mhlo.reduce_window"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors %3 = "mhlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32> @@ -34,11 +34,11 @@ } func @reduce_window_max_4x6xf32() { - %0 = iree.unfoldable_constant dense<[[[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0], [ 6.0]], + %0 = util.unfoldable_constant dense<[[[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0], [ 6.0]], [[ 7.0], [ 8.0], [ 9.0], [10.0], [11.0], [12.0]], [[13.0], [14.0], [15.0], [16.0], [17.0], [18.0]], [[19.0], [20.0], [21.0], [22.0], [23.0], [24.0]]]]> : tensor<1x4x6x1xf32> - %1 = iree.unfoldable_constant dense<0.0> : tensor<f32> + %1 = util.unfoldable_constant dense<0.0> : tensor<f32> %res = "mhlo.reduce_window"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors %3 = "mhlo.maximum"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32> @@ -50,11 +50,11 @@ } func @reduce_window_min_4x6xf32() { - %0 = iree.unfoldable_constant dense<[[[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0], [ 6.0]], + %0 = util.unfoldable_constant dense<[[[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0], [ 6.0]], [[ 7.0], [ 8.0], [ 9.0], [10.0], [11.0], [12.0]], [[13.0], [14.0], [15.0], [16.0], [17.0], [18.0]], [[19.0], [20.0], [21.0], [22.0], [23.0], [24.0]]]]> : tensor<1x4x6x1xf32> - %1 = iree.unfoldable_constant dense<14.0> : tensor<f32> + %1 = util.unfoldable_constant dense<14.0> : tensor<f32> %res = "mhlo.reduce_window"(%0, %1) ( { ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors %3 = "mhlo.minimum"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
diff --git a/iree/test/e2e/xla_ops/remainder.mlir b/iree/test/e2e/xla_ops/remainder.mlir index 094bb6a..fccff23 100644 --- a/iree/test/e2e/xla_ops/remainder.mlir +++ b/iree/test/e2e/xla_ops/remainder.mlir
@@ -1,62 +1,62 @@ func @scalar() { - %input1 = iree.unfoldable_constant dense<16.0> : tensor<f32> - %input2 = iree.unfoldable_constant dense<7.0> : tensor<f32> + %input1 = util.unfoldable_constant dense<16.0> : tensor<f32> + %input2 = util.unfoldable_constant dense<7.0> : tensor<f32> %result = "mhlo.remainder"(%input1, %input2) : (tensor<f32>, tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<2.0> : tensor<f32>) : tensor<f32> return } func @tensor() { - %input1 = iree.unfoldable_constant dense<[16.0, 17.0, 18.0]> : tensor<3xf32> - %input2 = iree.unfoldable_constant dense<[7.0, 8.0, 9.0]> : tensor<3xf32> + %input1 = util.unfoldable_constant dense<[16.0, 17.0, 18.0]> : tensor<3xf32> + %input2 = util.unfoldable_constant dense<[7.0, 8.0, 9.0]> : tensor<3xf32> %result = "mhlo.remainder"(%input1, %input2) : (tensor<3xf32>, tensor<3xf32>) -> tensor<3xf32> check.expect_almost_eq_const(%result, dense<[2.0, 1.0, 0.0]> : tensor<3xf32>) : tensor<3xf32> return } func @negative_den() { - %input1 = iree.unfoldable_constant dense<16.0> : tensor<f32> - %input2 = iree.unfoldable_constant dense<-7.0> : tensor<f32> + %input1 = util.unfoldable_constant dense<16.0> : tensor<f32> + %input2 = util.unfoldable_constant dense<-7.0> : tensor<f32> %result = "mhlo.remainder"(%input1, %input2) : (tensor<f32>, tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<2.0> : tensor<f32>) : tensor<f32> return } func @negative_num() { - %input1 = iree.unfoldable_constant dense<-16.0> : tensor<f32> - %input2 = iree.unfoldable_constant dense<7.0> : tensor<f32> + %input1 = util.unfoldable_constant dense<-16.0> : tensor<f32> + %input2 = util.unfoldable_constant dense<7.0> : tensor<f32> %result = "mhlo.remainder"(%input1, %input2) : (tensor<f32>, tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<-2.0> : tensor<f32>) : tensor<f32> return } func @scalar_int() { - %input1 = iree.unfoldable_constant dense<16> : tensor<i32> - %input2 = iree.unfoldable_constant dense<7> : tensor<i32> + %input1 = util.unfoldable_constant dense<16> : tensor<i32> + %input2 = util.unfoldable_constant dense<7> : tensor<i32> %result = "mhlo.remainder"(%input1, %input2) : (tensor<i32>, tensor<i32>) -> tensor<i32> check.expect_eq_const(%result, dense<2> : tensor<i32>) : tensor<i32> return } func @tensor_int() { - %input1 = iree.unfoldable_constant dense<[16, 17, 18]> : tensor<3xi32> - %input2 = iree.unfoldable_constant dense<[7, 8, 9]> : tensor<3xi32> + %input1 = util.unfoldable_constant dense<[16, 17, 18]> : tensor<3xi32> + %input2 = util.unfoldable_constant dense<[7, 8, 9]> : tensor<3xi32> %result = "mhlo.remainder"(%input1, %input2) : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi32> check.expect_eq_const(%result, dense<[2, 1, 0]> : tensor<3xi32>) : tensor<3xi32> return } func @negative_den_int() { - %input1 = iree.unfoldable_constant dense<16> : tensor<i32> - %input2 = iree.unfoldable_constant dense<-7> : tensor<i32> + %input1 = util.unfoldable_constant dense<16> : tensor<i32> + %input2 = util.unfoldable_constant dense<-7> : tensor<i32> %result = "mhlo.remainder"(%input1, %input2) : (tensor<i32>, tensor<i32>) -> tensor<i32> check.expect_eq_const(%result, dense<2> : tensor<i32>) : tensor<i32> return } func @negative_num_int() { - %input1 = iree.unfoldable_constant dense<-16> : tensor<i32> - %input2 = iree.unfoldable_constant dense<7> : tensor<i32> + %input1 = util.unfoldable_constant dense<-16> : tensor<i32> + %input2 = util.unfoldable_constant dense<7> : tensor<i32> %result = "mhlo.remainder"(%input1, %input2) : (tensor<i32>, tensor<i32>) -> tensor<i32> check.expect_eq_const(%result, dense<-2> : tensor<i32>) : tensor<i32> return
diff --git a/iree/test/e2e/xla_ops/reshape.mlir b/iree/test/e2e/xla_ops/reshape.mlir index 8ca7cf8..692de3d 100644 --- a/iree/test/e2e/xla_ops/reshape.mlir +++ b/iree/test/e2e/xla_ops/reshape.mlir
@@ -1,5 +1,5 @@ func @reshape_1D_2D() { - %input = iree.unfoldable_constant dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]> : tensor<12xi32> + %input = util.unfoldable_constant dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]> : tensor<12xi32> %result = "mhlo.reshape"(%input) : (tensor<12xi32>) -> tensor<3x4xi32> check.expect_eq_const(%result, dense<[ [1, 2, 3, 4], @@ -9,7 +9,7 @@ } // func @reshape_1D_3D() { -// %input = iree.unfoldable_constant dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]> : tensor<12xi32> +// %input = util.unfoldable_constant dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]> : tensor<12xi32> // %result = "mhlo.reshape"(%input) : (tensor<12xi32>) -> tensor<2x2x3xi32> // check.expect_eq_const(%result, dense<[ // [[1, 2, 3], [4, 5, 6]], @@ -18,14 +18,14 @@ // } // func @reshape_2D_3D() { -// %input = iree.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]> : tensor<2x6xi32> +// %input = util.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]> : tensor<2x6xi32> // %result = "mhlo.reshape"(%input) : (tensor<2x6xi32>) -> tensor<2x1x6xi32> // check.expect_eq_const(%result, dense<[[[1, 2, 3, 4, 5, 6]], [[7, 8, 9, 10, 11, 12]]]> : tensor<2x1x6xi32>) : tensor<2x1x6xi32> // return // } // func @reshape_3D_1D() { -// %input = iree.unfoldable_constant dense<[[[1, 2, 3, 4, 5, 6]], [[7, 8, 9, 10, 11, 12]]]> : tensor<2x1x6xi32> +// %input = util.unfoldable_constant dense<[[[1, 2, 3, 4, 5, 6]], [[7, 8, 9, 10, 11, 12]]]> : tensor<2x1x6xi32> // %result = "mhlo.reshape"(%input) : (tensor<2x1x6xi32>) -> tensor<2x6xi32> // check.expect_eq_const(%result, dense<[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]> : tensor<2x6xi32>) : tensor<2x6xi32> // return
diff --git a/iree/test/e2e/xla_ops/reverse.mlir b/iree/test/e2e/xla_ops/reverse.mlir index 30e89f0..58c8726 100644 --- a/iree/test/e2e/xla_ops/reverse.mlir +++ b/iree/test/e2e/xla_ops/reverse.mlir
@@ -1,5 +1,5 @@ func @xla_reverse() { - %t1 = iree.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> + %t1 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32> %dim0 = "mhlo.reverse"(%t1) {dimensions = dense<0> : tensor<1xi64>} : (tensor<2x3xf32>) -> tensor<2x3xf32> check.expect_almost_eq_const(
diff --git a/iree/test/e2e/xla_ops/round.mlir b/iree/test/e2e/xla_ops/round.mlir index 7ce990c..d2fd24d 100644 --- a/iree/test/e2e/xla_ops/round.mlir +++ b/iree/test/e2e/xla_ops/round.mlir
@@ -1,5 +1,5 @@ func @tensor() { - %input = iree.unfoldable_constant dense<[-0.7, -0.5, -0.2, 0.0, 0.2, 0.5, 0.7]> : tensor<7xf32> + %input = util.unfoldable_constant dense<[-0.7, -0.5, -0.2, 0.0, 0.2, 0.5, 0.7]> : tensor<7xf32> %result = "mhlo.round_nearest_afz"(%input) : (tensor<7xf32>) -> tensor<7xf32> check.expect_almost_eq_const(%result, dense<[-1.0, -1.0, 0.0, 0.0, 0.0, 1.0, 1.0]> : tensor<7xf32>) : tensor<7xf32> return
diff --git a/iree/test/e2e/xla_ops/rsqrt.mlir b/iree/test/e2e/xla_ops/rsqrt.mlir index 1af3940..e7888de 100644 --- a/iree/test/e2e/xla_ops/rsqrt.mlir +++ b/iree/test/e2e/xla_ops/rsqrt.mlir
@@ -1,12 +1,12 @@ func @tensor() { - %input = iree.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> %result = "mhlo.rsqrt"(%input) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[1.0, 0.707107, 0.57735, 0.5]> : tensor<4xf32>) : tensor<4xf32> return } func @scalar() { - %input = iree.unfoldable_constant dense<16.0> : tensor<f32> + %input = util.unfoldable_constant dense<16.0> : tensor<f32> %result = "mhlo.rsqrt"(%input) : (tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<0.25> : tensor<f32>) : tensor<f32> return
diff --git a/iree/test/e2e/xla_ops/scatter.mlir b/iree/test/e2e/xla_ops/scatter.mlir index e973830..3f1fa9b 100644 --- a/iree/test/e2e/xla_ops/scatter.mlir +++ b/iree/test/e2e/xla_ops/scatter.mlir
@@ -1,7 +1,7 @@ func @scatter_update_scalar_1D() { - %arg0 = iree.unfoldable_constant dense<0> : tensor<8xi32> - %arg1 = iree.unfoldable_constant dense<[[1], [3], [4], [7]]> : tensor<4x1xi32> - %arg2 = iree.unfoldable_constant dense<[9, 10, 11, 12]> : tensor<4xi32> + %arg0 = util.unfoldable_constant dense<0> : tensor<8xi32> + %arg1 = util.unfoldable_constant dense<[[1], [3], [4], [7]]> : tensor<4x1xi32> + %arg2 = util.unfoldable_constant dense<[9, 10, 11, 12]> : tensor<4xi32> %0 = "mhlo.scatter"(%arg0, %arg1, %arg2) ( { ^bb0(%arg3: tensor<i32>, %arg4: tensor<i32>): // no predecessors "mhlo.return"(%arg4) : (tensor<i32>) -> () @@ -20,9 +20,9 @@ } func @scatter_update_scalar_2D() { - %arg0 = iree.unfoldable_constant dense<0> : tensor<4x3xi32> - %arg1 = iree.unfoldable_constant dense<[[0, 0], [1, 1], [2, 2]]> : tensor<3x2xi32> - %arg2 = iree.unfoldable_constant dense<[1, 2, 3]> : tensor<3xi32> + %arg0 = util.unfoldable_constant dense<0> : tensor<4x3xi32> + %arg1 = util.unfoldable_constant dense<[[0, 0], [1, 1], [2, 2]]> : tensor<3x2xi32> + %arg2 = util.unfoldable_constant dense<[1, 2, 3]> : tensor<3xi32> %0 = "mhlo.scatter"(%arg0, %arg1, %arg2) ( { ^bb0(%arg3: tensor<i32>, %arg4: tensor<i32>): // no predecessors "mhlo.return"(%arg4) : (tensor<i32>) -> () @@ -43,9 +43,9 @@ } func @scatter_update_slice_2D() { - %arg0 = iree.unfoldable_constant dense<0> : tensor<6x3xi32> - %arg1 = iree.unfoldable_constant dense<[[2], [4]]> : tensor<2x1xi32> - %arg2 = iree.unfoldable_constant dense<[[1, 2, 3], + %arg0 = util.unfoldable_constant dense<0> : tensor<6x3xi32> + %arg1 = util.unfoldable_constant dense<[[2], [4]]> : tensor<2x1xi32> + %arg2 = util.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> %0 = "mhlo.scatter"(%arg0, %arg1, %arg2) ( { ^bb0(%arg3: tensor<i32>, %arg4: tensor<i32>): // no predecessors @@ -70,9 +70,9 @@ } func @scatter_add_slice_2D() { - %arg0 = iree.unfoldable_constant dense<1> : tensor<6x3xi32> - %arg1 = iree.unfoldable_constant dense<[[2], [4]]> : tensor<2x1xi32> - %arg2 = iree.unfoldable_constant dense<[[1, 2, 3], + %arg0 = util.unfoldable_constant dense<1> : tensor<6x3xi32> + %arg1 = util.unfoldable_constant dense<[[2], [4]]> : tensor<2x1xi32> + %arg2 = util.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> %0 = "mhlo.scatter"(%arg0, %arg1, %arg2) ( { ^bb0(%arg3: tensor<i32>, %arg4: tensor<i32>): // no predecessors @@ -98,8 +98,8 @@ } func @scatter_1D_large() { - %original = iree.unfoldable_constant dense<1> : tensor<1400xi32> - %update = iree.unfoldable_constant dense<2> : tensor<1400xi32> + %original = util.unfoldable_constant dense<1> : tensor<1400xi32> + %update = util.unfoldable_constant dense<2> : tensor<1400xi32> %init = linalg.init_tensor [1400] : tensor<1400xi32> %indices = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>], @@ -130,8 +130,8 @@ } func @scatter_2D_large() { - %original = iree.unfoldable_constant dense<1> : tensor<200x300xi32> - %update = iree.unfoldable_constant dense<2> : tensor<200x300xi32> + %original = util.unfoldable_constant dense<1> : tensor<200x300xi32> + %update = util.unfoldable_constant dense<2> : tensor<200x300xi32> %init = linalg.init_tensor [200] : tensor<200xi32> %indices = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>],
diff --git a/iree/test/e2e/xla_ops/scatter_dynamic.mlir b/iree/test/e2e/xla_ops/scatter_dynamic.mlir index 437ffd8..caed10b 100644 --- a/iree/test/e2e/xla_ops/scatter_dynamic.mlir +++ b/iree/test/e2e/xla_ops/scatter_dynamic.mlir
@@ -1,7 +1,7 @@ func @scatter_add_slice_2D_dynamic_num_updates() { - %arg0 = iree.unfoldable_constant dense<1> : tensor<6x3xi32> - %arg1 = iree.dynamic_shape_constant dense<[[2], [4]]> : tensor<2x1xi32> -> tensor<?x1xi32> - %arg2 = iree.dynamic_shape_constant dense<[[1, 2, 3], + %arg0 = util.unfoldable_constant dense<1> : tensor<6x3xi32> + %arg1 = util.dynamic_shape_constant dense<[[2], [4]]> : tensor<2x1xi32> -> tensor<?x1xi32> + %arg2 = util.dynamic_shape_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> -> tensor<?x3xi32> %0 = "mhlo.scatter"(%arg0, %arg1, %arg2) ( { ^bb0(%arg3: tensor<i32>, %arg4: tensor<i32>): // no predecessors
diff --git a/iree/test/e2e/xla_ops/select.mlir b/iree/test/e2e/xla_ops/select.mlir index a4f8c85..d2cff05 100644 --- a/iree/test/e2e/xla_ops/select.mlir +++ b/iree/test/e2e/xla_ops/select.mlir
@@ -1,10 +1,10 @@ func @select() { // TODO(b/132205704) support i1 in constants and function signatures. - %input = iree.unfoldable_constant dense<[1, 0, 1, 0]> : tensor<4xi32> - %zeros = iree.unfoldable_constant dense<0> : tensor<4xi32> + %input = util.unfoldable_constant dense<[1, 0, 1, 0]> : tensor<4xi32> + %zeros = util.unfoldable_constant dense<0> : tensor<4xi32> %cond = "mhlo.compare"(%input, %zeros) {comparison_direction = "GT"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> - %lhs = iree.unfoldable_constant dense<[1, 2, 3, 4]> : tensor<4xi32> - %rhs = iree.unfoldable_constant dense<[5, 6, 7, 8]> : tensor<4xi32> + %lhs = util.unfoldable_constant dense<[1, 2, 3, 4]> : tensor<4xi32> + %rhs = util.unfoldable_constant dense<[5, 6, 7, 8]> : tensor<4xi32> %result = "mhlo.select"(%cond, %lhs, %rhs) : (tensor<4xi1>, tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[1,6, 3, 8]> : tensor<4xi32>) : tensor<4xi32> return
diff --git a/iree/test/e2e/xla_ops/sine.mlir b/iree/test/e2e/xla_ops/sine.mlir index 0f717c4..973f45e 100644 --- a/iree/test/e2e/xla_ops/sine.mlir +++ b/iree/test/e2e/xla_ops/sine.mlir
@@ -1,12 +1,12 @@ func @tensor() { - %input = iree.unfoldable_constant dense<[0.0, 1.0, 1.5, 2.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[0.0, 1.0, 1.5, 2.0]> : tensor<4xf32> %result = "mhlo.sine"(%input) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[0.0, 0.8415, 0.9975, 0.9093]> : tensor<4xf32>) : tensor<4xf32> return } func @scalar() { - %input = iree.unfoldable_constant dense<3.0> : tensor<f32> + %input = util.unfoldable_constant dense<3.0> : tensor<f32> %result = "mhlo.sine"(%input) : (tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<0.14112> : tensor<f32>) : tensor<f32> return
diff --git a/iree/test/e2e/xla_ops/slice.mlir b/iree/test/e2e/xla_ops/slice.mlir index 3a4fe4e..a89b149 100644 --- a/iree/test/e2e/xla_ops/slice.mlir +++ b/iree/test/e2e/xla_ops/slice.mlir
@@ -1,5 +1,5 @@ func @slice_whole_buffer() { - %input = iree.unfoldable_constant dense<[ + %input = util.unfoldable_constant dense<[ [01, 02, 03, 04], [05, 06, 07, 08], [09, 10, 11, 12]]> : tensor<3x4xi32> @@ -16,7 +16,7 @@ } func @slice_whole_stride() { - %input = iree.unfoldable_constant dense<[ + %input = util.unfoldable_constant dense<[ [01, 02, 03, 04], [05, 06, 07, 08], [09, 10, 11, 12]]> : tensor<3x4xi32> @@ -30,7 +30,7 @@ } func @slice_stride_part() { - %input = iree.unfoldable_constant dense<[ + %input = util.unfoldable_constant dense<[ [01, 02, 03, 04], [05, 06, 07, 08], [09, 10, 11, 12]]> : tensor<3x4xi32> @@ -44,7 +44,7 @@ } func @slice_multi_stride() { - %input = iree.unfoldable_constant dense<[ + %input = util.unfoldable_constant dense<[ [01, 02, 03, 04], [05, 06, 07, 08], [09, 10, 11, 12]]> : tensor<3x4xi32>
diff --git a/iree/test/e2e/xla_ops/sort.mlir b/iree/test/e2e/xla_ops/sort.mlir index 0b88a9e..58813ea 100644 --- a/iree/test/e2e/xla_ops/sort.mlir +++ b/iree/test/e2e/xla_ops/sort.mlir
@@ -1,5 +1,5 @@ func @sort1D() { - %input = iree.unfoldable_constant dense<[3, 2, 1, 4]> : tensor<4xi32> + %input = util.unfoldable_constant dense<[3, 2, 1, 4]> : tensor<4xi32> %sort = "mhlo.sort"(%input) ( { ^bb0(%arg1: tensor<i32>, %arg2: tensor<i32>): // no predecessors @@ -12,7 +12,7 @@ } func @sort2D() { - %input = iree.unfoldable_constant dense<[[1, 2, 3, 4], + %input = util.unfoldable_constant dense<[[1, 2, 3, 4], [4, 3, 2, 1]]> : tensor<2x4xi32> %sort = "mhlo.sort"(%input) ( { @@ -26,7 +26,7 @@ } func @sort3D() { - %input = iree.unfoldable_constant dense<[[[1, 2, 3, 4], + %input = util.unfoldable_constant dense<[[[1, 2, 3, 4], [4, 3, 2, 1]]]> : tensor<1x2x4xi32> %sort = "mhlo.sort"(%input) ( { @@ -40,7 +40,7 @@ } func @sort_to_decreasing_seq() { - %input = iree.unfoldable_constant dense<[3, 2, 1, 4]> : tensor<4xi32> + %input = util.unfoldable_constant dense<[3, 2, 1, 4]> : tensor<4xi32> %sort = "mhlo.sort"(%input) ( { ^bb0(%arg1: tensor<i32>, %arg2: tensor<i32>): // no predecessors
diff --git a/iree/test/e2e/xla_ops/sqrt.mlir b/iree/test/e2e/xla_ops/sqrt.mlir index 209c556..ae0770a 100644 --- a/iree/test/e2e/xla_ops/sqrt.mlir +++ b/iree/test/e2e/xla_ops/sqrt.mlir
@@ -1,12 +1,12 @@ func @tensor() { - %input = iree.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32> %result = "mhlo.sqrt"(%input) : (tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[1.0, 1.4142, 1.7321, 2.0]> : tensor<4xf32>) : tensor<4xf32> return } func @scalar() { - %input = iree.unfoldable_constant dense<16.0> : tensor<f32> + %input = util.unfoldable_constant dense<16.0> : tensor<f32> %result = "mhlo.sqrt"(%input) : (tensor<f32>) -> tensor<f32> check.expect_almost_eq_const(%result, dense<4.0> : tensor<f32>) : tensor<f32> return
diff --git a/iree/test/e2e/xla_ops/subtract.mlir b/iree/test/e2e/xla_ops/subtract.mlir index 54a3640..1ae3581 100644 --- a/iree/test/e2e/xla_ops/subtract.mlir +++ b/iree/test/e2e/xla_ops/subtract.mlir
@@ -1,14 +1,14 @@ func @i32() { - %0 = iree.unfoldable_constant dense<[5, 6, 3, 4]> : tensor<4xi32> - %1 = iree.unfoldable_constant dense<[1, 4, 7, 6]> : tensor<4xi32> + %0 = util.unfoldable_constant dense<[5, 6, 3, 4]> : tensor<4xi32> + %1 = util.unfoldable_constant dense<[1, 4, 7, 6]> : tensor<4xi32> %result = "mhlo.subtract"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32> check.expect_eq_const(%result, dense<[4, 2, -4, -2]> : tensor<4xi32>) : tensor<4xi32> return } func @f32() { - %0 = iree.unfoldable_constant dense<[5.0, 6.0, 3.0, 4.0]> : tensor<4xf32> - %1 = iree.unfoldable_constant dense<[1.0, 4.0, 7.0, 6.0]> : tensor<4xf32> + %0 = util.unfoldable_constant dense<[5.0, 6.0, 3.0, 4.0]> : tensor<4xf32> + %1 = util.unfoldable_constant dense<[1.0, 4.0, 7.0, 6.0]> : tensor<4xf32> %result = "mhlo.subtract"(%0, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> check.expect_almost_eq_const(%result, dense<[4.0, 2.0, -4.0, -2.0]> : tensor<4xf32>) : tensor<4xf32> return
diff --git a/iree/test/e2e/xla_ops/tanh.mlir b/iree/test/e2e/xla_ops/tanh.mlir index 7e1742a..26c003c 100644 --- a/iree/test/e2e/xla_ops/tanh.mlir +++ b/iree/test/e2e/xla_ops/tanh.mlir
@@ -1,5 +1,5 @@ func @tanh() { - %input = iree.unfoldable_constant dense< + %input = util.unfoldable_constant dense< [[-100.0, -5.0, -0.5, 1.0], [ 1.2, 2.0, 3.0, 100.0]]> : tensor<2x4xf32> %result = "mhlo.tanh"(%input) : (tensor<2x4xf32>) -> tensor<2x4xf32>
diff --git a/iree/test/e2e/xla_ops/torch_index_select.mlir b/iree/test/e2e/xla_ops/torch_index_select.mlir index ba755d5..1f4eee2 100644 --- a/iree/test/e2e/xla_ops/torch_index_select.mlir +++ b/iree/test/e2e/xla_ops/torch_index_select.mlir
@@ -1,11 +1,11 @@ func @torch_select_index_0() { - %input = iree.unfoldable_constant dense<[ + %input = util.unfoldable_constant dense<[ [[01, 02, 03, 04, 05]], [[06, 07, 08, 09, 10]], [[11, 12, 13, 14, 15]], [[16, 17, 18, 19, 20]], [[21, 22, 23, 24, 25]]]> : tensor<5x1x5xi32> - %indices = iree.unfoldable_constant dense<[0, 2]> : tensor<2xi32> + %indices = util.unfoldable_constant dense<[0, 2]> : tensor<2xi32> %res = "mhlo.torch_index_select"(%input, %indices) { dim = 0 : i64, batch_dims = 0 : i64 @@ -15,11 +15,11 @@ } func @torch_select_index_1() { - %input = iree.unfoldable_constant dense<[ + %input = util.unfoldable_constant dense<[ [[ 1, 2],[ 3, 4]], [[ 5, 6],[ 7, 8]], [[ 9, 10],[11, 12]]]> : tensor<3x2x2xi32> - %indices = iree.unfoldable_constant dense<[0, 1]> : tensor<2xi32> + %indices = util.unfoldable_constant dense<[0, 1]> : tensor<2xi32> %res = "mhlo.torch_index_select"(%input, %indices) { dim = 1 : i64, batch_dims = 0 : i64 @@ -29,13 +29,13 @@ } func @torch_select_index_2() { - %input = iree.unfoldable_constant dense<[ + %input = util.unfoldable_constant dense<[ [[01, 02, 03, 04, 05]], [[06, 07, 08, 09, 10]], [[11, 12, 13, 14, 15]], [[16, 17, 18, 19, 20]], [[21, 22, 23, 24, 25]]]> : tensor<5x1x5xi32> - %indices = iree.unfoldable_constant dense<0> : tensor<i32> + %indices = util.unfoldable_constant dense<0> : tensor<i32> %res = "mhlo.torch_index_select"(%input, %indices) { dim = 0 : i64, batch_dims = 0 : i64
diff --git a/iree/test/e2e/xla_ops/transpose.mlir b/iree/test/e2e/xla_ops/transpose.mlir index c125bcd..db67a29 100644 --- a/iree/test/e2e/xla_ops/transpose.mlir +++ b/iree/test/e2e/xla_ops/transpose.mlir
@@ -1,5 +1,5 @@ func @transpose_2d() { - %input = iree.unfoldable_constant dense<[[1, 2, 3], + %input = util.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32> %0 = "mhlo.transpose"(%input) { permutation = dense<[1, 0]> : tensor<2xi64> @@ -11,7 +11,7 @@ } func @transpose_3d() { - %input = iree.unfoldable_constant dense<[[[ 1, 2, 3], + %input = util.unfoldable_constant dense<[[[ 1, 2, 3], [ 4, 5, 6]], [[ 7, 8, 9], [10, 11, 12]]]> : tensor<2x2x3xi32>
diff --git a/iree/test/e2e/xla_ops/while.mlir b/iree/test/e2e/xla_ops/while.mlir index c384b93..d4884c2 100644 --- a/iree/test/e2e/xla_ops/while.mlir +++ b/iree/test/e2e/xla_ops/while.mlir
@@ -1,7 +1,7 @@ // NOTE: this has already been legalized to CFG form in the TF import tools. func @while() { - %start = iree.unfoldable_constant dense<1> : tensor<i32> - %bound = iree.unfoldable_constant dense<3> : tensor<i32> + %start = util.unfoldable_constant dense<1> : tensor<i32> + %bound = util.unfoldable_constant dense<3> : tensor<i32> %cst_1 = constant dense<4> : tensor<i32> br ^bb1(%start : tensor<i32>) ^bb1(%2: tensor<i32>):
diff --git a/iree/test/microbenchmarks/mhlo_conv.mlir b/iree/test/microbenchmarks/mhlo_conv.mlir index 90a7f93..e75343f 100644 --- a/iree/test/microbenchmarks/mhlo_conv.mlir +++ b/iree/test/microbenchmarks/mhlo_conv.mlir
@@ -9,8 +9,8 @@ //===----------------------------------------------------------------------===// func @conv_244_112_3x3_3x32() -> tensor<1x112x112x32xf32> { - %input = iree.unfoldable_constant dense<1.0> : tensor<1x224x224x3xf32> - %filter = iree.unfoldable_constant dense<1.0> : tensor<3x3x3x32xf32> + %input = util.unfoldable_constant dense<1.0> : tensor<1x224x224x3xf32> + %filter = util.unfoldable_constant dense<1.0> : tensor<3x3x3x32xf32> %0 = "mhlo.convolution"(%input, %filter) { batch_group_count = 1 : i64, dimension_numbers = { @@ -33,8 +33,8 @@ } func @conv_112_112_1x1_32x64() -> tensor<1x112x112x64xf32> { - %input = iree.unfoldable_constant dense<1.0> : tensor<1x112x112x32xf32> - %filter = iree.unfoldable_constant dense<1.0> : tensor<1x1x32x64xf32> + %input = util.unfoldable_constant dense<1.0> : tensor<1x112x112x32xf32> + %filter = util.unfoldable_constant dense<1.0> : tensor<1x1x32x64xf32> %0 = "mhlo.convolution"(%input, %filter) { batch_group_count = 1 : i64, dimension_numbers = { @@ -57,8 +57,8 @@ } func @conv_7_7_1x1_1024x1024() -> tensor<1x7x7x1024xf32> { - %input = iree.unfoldable_constant dense<1.0> : tensor<1x7x7x1024xf32> - %filter = iree.unfoldable_constant dense<1.0> : tensor<1x1x1024x1024xf32> + %input = util.unfoldable_constant dense<1.0> : tensor<1x7x7x1024xf32> + %filter = util.unfoldable_constant dense<1.0> : tensor<1x1x1024x1024xf32> %0 = "mhlo.convolution"(%input, %filter) { batch_group_count = 1 : i64, dimension_numbers = { @@ -92,8 +92,8 @@ //===----------------------------------------------------------------------===// func @depthwise_conv_15x1_1x1_15x1_1x1024_1024() -> tensor<1x1x1x1024xf32> { - %input = iree.unfoldable_constant dense<1.0> : tensor<1x15x1x1024xf32> - %filter = iree.unfoldable_constant dense<1.0> : tensor<15x1x1x1024xf32> + %input = util.unfoldable_constant dense<1.0> : tensor<1x15x1x1024xf32> + %filter = util.unfoldable_constant dense<1.0> : tensor<15x1x1x1024xf32> %res = "mhlo.convolution"(%input, %filter) { batch_group_count = 1 : i64, dimension_numbers = { @@ -116,8 +116,8 @@ } func @depthwise_conv_15x1_1x1_15x1_1x512_512() -> tensor<1x1x1x512xf32> { - %input = iree.unfoldable_constant dense<1.0> : tensor<1x15x1x512xf32> - %filter = iree.unfoldable_constant dense<1.0> : tensor<15x1x1x512xf32> + %input = util.unfoldable_constant dense<1.0> : tensor<1x15x1x512xf32> + %filter = util.unfoldable_constant dense<1.0> : tensor<15x1x1x512xf32> %res = "mhlo.convolution"(%input, %filter) { batch_group_count = 1 : i64, dimension_numbers = { @@ -140,8 +140,8 @@ } func @depthwise_conv_16x1_2x1_16x1_1x512_512() -> tensor<1x2x1x512xf32> { - %input = iree.unfoldable_constant dense<1.0> : tensor<1x16x1x512xf32> - %filter = iree.unfoldable_constant dense<1.0> : tensor<15x1x1x512xf32> + %input = util.unfoldable_constant dense<1.0> : tensor<1x16x1x512xf32> + %filter = util.unfoldable_constant dense<1.0> : tensor<15x1x1x512xf32> %res = "mhlo.convolution"(%input, %filter) { batch_group_count = 1 : i64, dimension_numbers = {
diff --git a/iree/test/microbenchmarks/mhlo_dot.mlir b/iree/test/microbenchmarks/mhlo_dot.mlir index 5fe154f..81d6b2c 100644 --- a/iree/test/microbenchmarks/mhlo_dot.mlir +++ b/iree/test/microbenchmarks/mhlo_dot.mlir
@@ -3,43 +3,43 @@ //===----------------------------------------------------------------------===// func @dot_384x384x512() -> tensor<384x512xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<384x384xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<384x512xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<384x384xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<384x512xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<384x384xf32>, tensor<384x512xf32>) -> tensor<384x512xf32> return %0: tensor<384x512xf32> } func @dot_384x128x128() -> tensor<384x128xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<384x128xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<128x128xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<384x128xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<128x128xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<384x128xf32>, tensor<128x128xf32>) -> tensor<384x128xf32> return %0 : tensor<384x128xf32> } func @dot_384x128x512() -> tensor<384x512xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<384x128xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<128x512xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<384x128xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<128x512xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<384x128xf32>, tensor<128x512xf32>) -> tensor<384x512xf32> return %0 : tensor<384x512xf32> } func @dot_384x512x128() -> tensor<384x128xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<384x512xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<512x128xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<384x512xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<512x128xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<384x512xf32>, tensor<512x128xf32>) -> tensor<384x128xf32> return %0 : tensor<384x128xf32> } func @dot_384x512x2() -> tensor<384x2xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<384x512xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<512x2xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<384x512xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<512x2xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<384x512xf32>, tensor<512x2xf32>) -> tensor<384x2xf32> return %0 : tensor<384x2xf32> } func @dot_384x384x32() -> tensor<384x32xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<384x384xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<384x32xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<384x384xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<384x32xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<384x384xf32>, tensor<384x32xf32>) -> tensor<384x32xf32> return %0 : tensor<384x32xf32> } @@ -49,127 +49,127 @@ //===----------------------------------------------------------------------===// func @dot_1x1024x1024() -> tensor<1x1024xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<1x1024xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<1024x1024xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<1x1024xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<1024x1024xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<1x1024xf32>, tensor<1024x1024xf32>) -> tensor<1x1024xf32> return %0 : tensor<1x1024xf32> } func @dot_1x1024x2048() -> tensor<1x2048xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<1x1024xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<1024x2048xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<1x1024xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<1024x2048xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<1x1024xf32>, tensor<1024x2048xf32>) -> tensor<1x2048xf32> return %0 : tensor<1x2048xf32> } func @dot_1x1024x3072() -> tensor<1x3072xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<1x1024xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<1024x3072xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<1x1024xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<1024x3072xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<1x1024xf32>, tensor<1024x3072xf32>) -> tensor<1x3072xf32> return %0 : tensor<1x3072xf32> } func @dot_1x1024x512() -> tensor<1x512xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<1x1024xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<1024x512xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<1x1024xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<1024x512xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<1x1024xf32>, tensor<1024x512xf32>) -> tensor<1x512xf32> return %0 : tensor<1x512xf32> } func @dot_1x128x2() -> tensor<1x2xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<1x128xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<128x2xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<1x128xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<128x2xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<1x128xf32>, tensor<128x2xf32>) -> tensor<1x2xf32> return %0 : tensor<1x2xf32> } func @dot_1x256x512() -> tensor<1x512xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<1x256xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<256x512xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<1x256xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<256x512xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<1x256xf32>, tensor<256x512xf32>) -> tensor<1x512xf32> return %0 : tensor<1x512xf32> } func @dot_1x3072x1024() -> tensor<1x1024xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<1x3072xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<3072x1024xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<1x3072xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<3072x1024xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<1x3072xf32>, tensor<3072x1024xf32>) -> tensor<1x1024xf32> return %0 : tensor<1x1024xf32> } func @dot_1x3072x512() -> tensor<1x512xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<1x3072xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<3072x512xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<1x3072xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<3072x512xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<1x3072xf32>, tensor<3072x512xf32>) -> tensor<1x512xf32> return %0 : tensor<1x512xf32> } func @dot_1x512x1024() -> tensor<1x1024xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<1x512xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<512x1024xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<1x512xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<512x1024xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<1x512xf32>, tensor<512x1024xf32>) -> tensor<1x1024xf32> return %0 : tensor<1x1024xf32> } func @dot_1x512x3072() -> tensor<1x3072xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<1x512xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<512x3072xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<1x512xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<512x3072xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<1x512xf32>, tensor<512x3072xf32>) -> tensor<1x3072xf32> return %0 : tensor<1x3072xf32> } func @dot_1x512x512() -> tensor<1x512xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<1x512xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<512x512xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<1x512xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<512x512xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<1x512xf32>, tensor<512x512xf32>) -> tensor<1x512xf32> return %0 : tensor<1x512xf32> } func @dot_1x528x128() -> tensor<1x128xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<1x528xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<528x128xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<1x528xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<528x128xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<1x528xf32>, tensor<528x128xf32>) -> tensor<1x128xf32> return %0 : tensor<1x128xf32> } func @dot_2x3072x512() -> tensor<2x512xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<2x3072xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<3072x512xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<2x3072xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<3072x512xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<2x3072xf32>, tensor<3072x512xf32>) -> tensor<2x512xf32> return %0 : tensor<2x512xf32> } func @dot_2x512x1024() -> tensor<2x1024xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<2x512xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<512x1024xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<2x512xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<512x1024xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<2x512xf32>, tensor<512x1024xf32>) -> tensor<2x1024xf32> return %0 : tensor<2x1024xf32> } func @dot_2x512x3072() -> tensor<2x3072xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<2x512xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<512x3072xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<2x512xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<512x3072xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<2x512xf32>, tensor<512x3072xf32>) -> tensor<2x3072xf32> return %0 : tensor<2x3072xf32> } func @dot_2x512x512() -> tensor<2x512xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<2x512xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<512x512xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<2x512xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<512x512xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<2x512xf32>, tensor<512x512xf32>) -> tensor<2x512xf32> return %0 : tensor<2x512xf32> } func @dot_2x528x512() -> tensor<2x512xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<2x528xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<528x512xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<2x528xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<528x512xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<2x528xf32>, tensor<528x512xf32>) -> tensor<2x512xf32> return %0 : tensor<2x512xf32> } func @dot_6x513x128() -> tensor<6x128xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<6x513xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<513x128xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<6x513xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<513x128xf32> %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<6x513xf32>, tensor<513x128xf32>) -> tensor<6x128xf32> return %0 : tensor<6x128xf32> }
diff --git a/iree/test/microbenchmarks/mhlo_dot_general.mlir b/iree/test/microbenchmarks/mhlo_dot_general.mlir index 6f45412..c347c57 100644 --- a/iree/test/microbenchmarks/mhlo_dot_general.mlir +++ b/iree/test/microbenchmarks/mhlo_dot_general.mlir
@@ -2,8 +2,8 @@ // https://github.com/google/iree/blob/main/integrations/tensorflow/e2e/mobile_bert_squad_test.py func @dot_general_4x384x32x384() -> tensor<4x384x384xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<4x384x32xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<4x32x384xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<4x384x32xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<4x32x384xf32> %0 = "mhlo.dot_general"(%lhs, %rhs) { dot_dimension_numbers = { lhs_batching_dimensions = dense<0> : tensor<1xi64>, @@ -16,8 +16,8 @@ } func @dot_general_4x384x384x32() -> tensor<4x384x32xf32> { - %lhs = iree.unfoldable_constant dense<1.0> : tensor<4x384x384xf32> - %rhs = iree.unfoldable_constant dense<1.0> : tensor<4x384x32xf32> + %lhs = util.unfoldable_constant dense<1.0> : tensor<4x384x384xf32> + %rhs = util.unfoldable_constant dense<1.0> : tensor<4x384x32xf32> %0 = "mhlo.dot_general"(%lhs, %rhs) { dot_dimension_numbers = { lhs_batching_dimensions = dense<0> : tensor<1xi64>,
diff --git a/iree/test/microbenchmarks/mhlo_fft_abs.mlir b/iree/test/microbenchmarks/mhlo_fft_abs.mlir index f60b8fc..a06d31f 100644 --- a/iree/test/microbenchmarks/mhlo_fft_abs.mlir +++ b/iree/test/microbenchmarks/mhlo_fft_abs.mlir
@@ -3,7 +3,7 @@ //===----------------------------------------------------------------------===// func @rfft_abs_6x1024() -> tensor<6x513xf32> { - %input = iree.unfoldable_constant dense<1.0> : tensor<6x1024xf32> + %input = util.unfoldable_constant dense<1.0> : tensor<6x1024xf32> %0 = "mhlo.fft"(%input) { fft_length = dense<1024> : tensor<1xi64>, fft_type = "RFFT"
diff --git a/iree/tools/BUILD b/iree/tools/BUILD index 4ca84ad..81ba81d 100644 --- a/iree/tools/BUILD +++ b/iree/tools/BUILD
@@ -104,14 +104,14 @@ "//iree/compiler/Dialect/Flow/Transforms", "//iree/compiler/Dialect/HAL/IR:HALDialect", "//iree/compiler/Dialect/HAL/Transforms", - "//iree/compiler/Dialect/IREE/IR", - "//iree/compiler/Dialect/IREE/Transforms", "//iree/compiler/Dialect/LinalgExt/IR", "//iree/compiler/Dialect/LinalgExt/Transforms", "//iree/compiler/Dialect/Modules/VMVX/IR:VMVXDialect", "//iree/compiler/Dialect/Modules/VMVX/Transforms", "//iree/compiler/Dialect/Shape/IR", "//iree/compiler/Dialect/Shape/Transforms", + "//iree/compiler/Dialect/Util/IR", + "//iree/compiler/Dialect/Util/Transforms", "//iree/compiler/Dialect/VM/Analysis", "//iree/compiler/Dialect/VM/IR", "//iree/compiler/Dialect/VM/Transforms", @@ -333,7 +333,7 @@ cc_binary( name = "iree-tblgen", srcs = [ - "//iree/compiler/Dialect/IREE/Tools:GenSrcs", + "//iree/compiler/Dialect/Util/Tools:GenSrcs", "//iree/compiler/Dialect/VM/Tools:GenSrcs", ], tags = ["hostonly"],
diff --git a/iree/tools/CMakeLists.txt b/iree/tools/CMakeLists.txt index 1217d8e..0764f85 100644 --- a/iree/tools/CMakeLists.txt +++ b/iree/tools/CMakeLists.txt
@@ -178,7 +178,7 @@ iree-tblgen SRCS "${IREE_ROOT_DIR}/third_party/llvm-project/mlir/tools/mlir-tblgen/mlir-tblgen.cpp" - "${IREE_SOURCE_DIR}/iree/compiler/Dialect/IREE/Tools/StructAttrGen.cpp" + "${IREE_SOURCE_DIR}/iree/compiler/Dialect/Util/Tools/StructAttrGen.cpp" "${IREE_SOURCE_DIR}/iree/compiler/Dialect/VM/Tools/VMOpEncoderGen.cpp" "${IREE_SOURCE_DIR}/iree/compiler/Dialect/VM/Tools/VMOpTableGen.cpp" DEPS @@ -203,14 +203,14 @@ iree::compiler::Dialect::Flow::Transforms iree::compiler::Dialect::HAL::IR::HALDialect iree::compiler::Dialect::HAL::Transforms - iree::compiler::Dialect::IREE::IR - iree::compiler::Dialect::IREE::Transforms iree::compiler::Dialect::LinalgExt::IR iree::compiler::Dialect::LinalgExt::Transforms iree::compiler::Dialect::Modules::VMVX::IR::VMVXDialect iree::compiler::Dialect::Modules::VMVX::Transforms iree::compiler::Dialect::Shape::IR iree::compiler::Dialect::Shape::Transforms + iree::compiler::Dialect::Util::IR + iree::compiler::Dialect::Util::Transforms iree::compiler::Dialect::VM::Analysis iree::compiler::Dialect::VM::IR iree::compiler::Dialect::VM::Transforms @@ -462,12 +462,10 @@ # does not support (https://gitlab.kitware.com/cmake/cmake/-/issues/18627). add_executable(iree::tools::IreeFileCheck ALIAS iree_tools_IreeFileCheck) - if(${IREE_MLIR_DEP_MODE} STREQUAL "BUNDLED") - add_custom_target(BundledLLVMFileCheck ALL - COMMAND ${CMAKE_COMMAND} -E create_symlink $<TARGET_FILE:FileCheck> FileCheck - DEPENDS FileCheck - ) - endif() + add_custom_target(BundledLLVMFileCheck ALL + COMMAND ${CMAKE_COMMAND} -E create_symlink $<TARGET_FILE:FileCheck> FileCheck + DEPENDS FileCheck + ) # lld install - required by the compiler to link codegen executables. install(
diff --git a/iree/tools/init_iree_dialects.h b/iree/tools/init_iree_dialects.h index c3c607e..200a30b 100644 --- a/iree/tools/init_iree_dialects.h +++ b/iree/tools/init_iree_dialects.h
@@ -14,10 +14,10 @@ #include "iree/compiler/Dialect/Flow/IR/FlowDialect.h" #include "iree/compiler/Dialect/HAL/IR/HALDialect.h" -#include "iree/compiler/Dialect/IREE/IR/IREEDialect.h" #include "iree/compiler/Dialect/LinalgExt/IR/LinalgExtDialect.h" #include "iree/compiler/Dialect/Modules/VMVX/IR/VMVXDialect.h" #include "iree/compiler/Dialect/Shape/IR/ShapeDialect.h" +#include "iree/compiler/Dialect/Util/IR/UtilDialect.h" #include "iree/compiler/Dialect/VM/IR/VMDialect.h" #include "iree/compiler/Dialect/Vulkan/IR/VulkanDialect.h" #include "mlir/IR/Dialect.h" @@ -31,7 +31,7 @@ registry.insert<IREE::Flow::FlowDialect, IREE::HAL::HALDialect, ShapeDialect, - IREEDialect, + IREE::Util::UtilDialect, linalg_ext::LinalgExtDialect, IREE::VM::VMDialect, IREE::VMVX::VMVXDialect,
diff --git a/iree/tools/init_iree_passes.h b/iree/tools/init_iree_passes.h index 20bd549..7c8daa3 100644 --- a/iree/tools/init_iree_passes.h +++ b/iree/tools/init_iree_passes.h
@@ -18,10 +18,10 @@ #include "iree/compiler/Bindings/TFLite/Transforms/Passes.h" #include "iree/compiler/Dialect/Flow/Transforms/Passes.h" #include "iree/compiler/Dialect/HAL/Transforms/Passes.h" -#include "iree/compiler/Dialect/IREE/Transforms/Passes.h" #include "iree/compiler/Dialect/LinalgExt/Transforms/Passes.h" #include "iree/compiler/Dialect/Modules/VMVX/Transforms/Passes.h" #include "iree/compiler/Dialect/Shape/Transforms/Passes.h" +#include "iree/compiler/Dialect/Util/Transforms/Passes.h" #include "iree/compiler/Dialect/VM/Analysis/TestPasses.h" #include "iree/compiler/Dialect/VM/Transforms/Passes.h" #include "iree/compiler/InputConversion/Common/Passes.h" @@ -45,10 +45,10 @@ registerTOSAConversionPasses(); linalg_ext::registerLinalgExtPasses(); + Shape::registerShapePasses(); IREE::Flow::registerFlowPasses(); IREE::HAL::registerHALPasses(); - IREE::registerTransformPasses(); - Shape::registerShapePasses(); + IREE::Util::registerTransformPasses(); IREE::VM::registerVMPasses(); IREE::VM::registerVMAnalysisTestPasses(); IREE::VM::registerVMTestPasses();
diff --git a/iree/tools/test/benchmark_flags.txt b/iree/tools/test/benchmark_flags.txt index 015254f..b48d3bb 100644 --- a/iree/tools/test/benchmark_flags.txt +++ b/iree/tools/test/benchmark_flags.txt
@@ -6,13 +6,13 @@ module { // LIST-BENCHMARKS: BM_foo1 func @foo1() -> tensor<4xf32> { - %input = iree.unfoldable_constant dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32> %result = "mhlo.exponential"(%input) : (tensor<4xf32>) -> tensor<4xf32> return %result : tensor<4xf32> } // LIST-BENCHMARKS: BM_foo2 func @foo2() -> tensor<4xf32> { - %input = iree.unfoldable_constant dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32> %result = "mhlo.abs"(%input) : (tensor<4xf32>) -> tensor<4xf32> return %result : tensor<4xf32> }
diff --git a/iree/tools/test/multiple_exported_functions.mlir b/iree/tools/test/multiple_exported_functions.mlir index 7acd64c..8ba005e 100644 --- a/iree/tools/test/multiple_exported_functions.mlir +++ b/iree/tools/test/multiple_exported_functions.mlir
@@ -3,12 +3,12 @@ module { func @foo1() -> tensor<4xf32> { - %input = iree.unfoldable_constant dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32> %result = "mhlo.exponential"(%input) : (tensor<4xf32>) -> tensor<4xf32> return %result : tensor<4xf32> } func @foo2() -> tensor<4xf32> { - %input = iree.unfoldable_constant dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32> + %input = util.unfoldable_constant dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32> %result = "mhlo.abs"(%input) : (tensor<4xf32>) -> tensor<4xf32> return %result : tensor<4xf32> }
diff --git a/iree/tools/test/repeated_return.mlir b/iree/tools/test/repeated_return.mlir index 048585e..f26aa44 100644 --- a/iree/tools/test/repeated_return.mlir +++ b/iree/tools/test/repeated_return.mlir
@@ -6,7 +6,7 @@ // CHECK-LABEL: EXEC @many_tensor func @many_tensor() -> (tensor<2x2xf32>, tensor<2x2xf32>, tensor<2x2xf32>, tensor<2x2xf32>, tensor<2x2xf32>, tensor<2x2xf32>) { - %res = iree.unfoldable_constant + %res = util.unfoldable_constant dense<[[1.0, 2.0], [3.0, 4.0]]> : tensor<2x2xf32> return %res, %res, %res, %res, %res, %res : tensor<2x2xf32>, tensor<2x2xf32>, tensor<2x2xf32>, tensor<2x2xf32>,
diff --git a/iree/tools/utils/trace_replay.c b/iree/tools/utils/trace_replay.c index 7f91909..6c40dc4 100644 --- a/iree/tools/utils/trace_replay.c +++ b/iree/tools/utils/trace_replay.c
@@ -6,6 +6,7 @@ #include "iree/tools/utils/trace_replay.h" +#include <ctype.h> #include <inttypes.h> #include <stdio.h> #include <stdlib.h> @@ -324,18 +325,29 @@ // - 2 // - 3 // ``` +// or +// ```yaml +// shape: 1x2x3 +// ``` static iree_status_t iree_trace_replay_parse_hal_shape( iree_trace_replay_t* replay, yaml_document_t* document, - yaml_node_t* shape_node, size_t shape_capacity, iree_hal_dim_t* shape, - size_t* out_shape_rank) { - size_t shape_rank = 0; + yaml_node_t* shape_node, iree_host_size_t shape_capacity, + iree_hal_dim_t* shape, iree_host_size_t* out_shape_rank) { + iree_host_size_t shape_rank = 0; *out_shape_rank = shape_rank; if (!shape_node) return iree_ok_status(); - if (shape_node->type != YAML_SEQUENCE_NODE) { + + if (shape_node->type == YAML_SCALAR_NODE) { + // Short-hand using the canonical shape parser (4x8). + return iree_hal_parse_shape(iree_yaml_node_as_string(shape_node), + shape_capacity, shape, out_shape_rank); + } else if (shape_node->type != YAML_SEQUENCE_NODE) { return iree_make_status(IREE_STATUS_INVALID_ARGUMENT, - "(%zu): expected sequence node for shape", + "(%zu): expected scalar or sequence node for shape", shape_node->start_mark.line); } + + // Shape dimension list: for (yaml_node_item_t* item = shape_node->data.sequence.items.start; item != shape_node->data.sequence.items.top; ++item) { yaml_node_t* dim_node = yaml_document_get_node(document, *item); @@ -363,6 +375,43 @@ return iree_ok_status(); } +// Parses an element type. +// +// ```yaml +// element_type: 50331680 +// ``` +// or +// ```yaml +// element_type: f32 +// ``` +static iree_status_t iree_trace_replay_parse_hal_element_type( + iree_trace_replay_t* replay, yaml_document_t* document, + yaml_node_t* element_type_node, iree_hal_element_type_t* out_element_type) { + *out_element_type = IREE_HAL_ELEMENT_TYPE_NONE; + + iree_string_view_t element_type_str = + iree_yaml_node_as_string(element_type_node); + if (iree_string_view_is_empty(element_type_str)) { + return iree_make_status(IREE_STATUS_INVALID_ARGUMENT, + "(%zu): element type missing", + element_type_node->start_mark.line); + } + + // If the first character is a digit then interpret as a %d type. + if (isdigit(element_type_str.data[0])) { + static_assert(sizeof(*out_element_type) == sizeof(uint32_t), "4 bytes"); + if (!iree_string_view_atoi_uint32(element_type_str, out_element_type)) { + return iree_make_status(IREE_STATUS_OUT_OF_RANGE, + "(%zu): invalid element type", + element_type_node->start_mark.line); + } + return iree_ok_status(); + } + + // Parse as a canonical element type. + return iree_hal_parse_element_type(element_type_str, out_element_type); +} + // Parses a serialized !hal.buffer into |buffer|. // // ```yaml @@ -371,7 +420,8 @@ // ``` static iree_status_t iree_trace_replay_parse_hal_buffer( iree_trace_replay_t* replay, yaml_document_t* document, - yaml_node_t* contents_node, iree_hal_buffer_t* buffer) { + yaml_node_t* contents_node, iree_hal_element_type_t element_type, + iree_hal_buffer_t* buffer) { if (!contents_node) { // Empty contents = zero fill. return iree_ok_status(); @@ -380,23 +430,26 @@ "(%zu): expected scalar node for buffer contents", contents_node->start_mark.line); } - iree_string_view_t value = iree_make_string_view( - contents_node->data.scalar.value, contents_node->data.scalar.length); - value = iree_string_view_trim(value); + iree_string_view_t value = + iree_string_view_trim(iree_yaml_node_as_string(contents_node)); + iree_hal_buffer_mapping_t mapping; + IREE_RETURN_IF_ERROR( + iree_hal_buffer_map_range(buffer, IREE_HAL_MEMORY_ACCESS_DISCARD_WRITE, 0, + IREE_WHOLE_BUFFER, &mapping)); + iree_status_t status = iree_ok_status(); if (strcmp(contents_node->tag, "tag:yaml.org,2002:binary") == 0) { - iree_hal_buffer_mapping_t mapping; - IREE_RETURN_IF_ERROR( - iree_hal_buffer_map_range(buffer, IREE_HAL_MEMORY_ACCESS_DISCARD_WRITE, - 0, IREE_WHOLE_BUFFER, &mapping)); - iree_status_t status = iree_yaml_base64_decode(value, mapping.contents); - iree_hal_buffer_unmap_range(&mapping); - return status; + status = iree_yaml_base64_decode(value, mapping.contents); + } else if (strcmp(contents_node->tag, "tag:yaml.org,2002:str") == 0) { + status = + iree_hal_parse_buffer_elements(value, element_type, mapping.contents); + } else { + status = iree_make_status( + IREE_STATUS_UNIMPLEMENTED, "(%zu): unimplemented buffer encoding '%s'", + contents_node->start_mark.line, contents_node->tag); } - - return iree_make_status(IREE_STATUS_UNIMPLEMENTED, - "(%zu): unimplemented buffer encoding '%s'", - contents_node->start_mark.line, contents_node->tag); + iree_hal_buffer_unmap_range(&mapping); + return status; } // Parses a !hal.buffer_view and appends it to |target_list|. @@ -416,19 +469,14 @@ document, value_node, iree_make_cstring_view("element_type"), &element_type_node)); iree_hal_element_type_t element_type = IREE_HAL_ELEMENT_TYPE_NONE; - static_assert(sizeof(element_type) == sizeof(uint32_t), "4 bytes"); - if (!iree_string_view_atoi_uint32(iree_yaml_node_as_string(element_type_node), - &element_type)) { - return iree_make_status(IREE_STATUS_OUT_OF_RANGE, - "(%zu): invalid element type", - element_type_node->start_mark.line); - } + IREE_RETURN_IF_ERROR(iree_trace_replay_parse_hal_element_type( + replay, document, element_type_node, &element_type)); yaml_node_t* shape_node = NULL; IREE_RETURN_IF_ERROR(iree_yaml_mapping_try_find( document, value_node, iree_make_cstring_view("shape"), &shape_node)); iree_hal_dim_t shape[16]; - size_t shape_rank = 0; + iree_host_size_t shape_rank = 0; IREE_RETURN_IF_ERROR(iree_trace_replay_parse_hal_shape( replay, document, shape_node, IREE_ARRAYSIZE(shape), shape, &shape_rank)); @@ -447,7 +495,7 @@ IREE_HAL_MEMORY_TYPE_DEVICE_LOCAL | IREE_HAL_MEMORY_TYPE_HOST_VISIBLE, IREE_HAL_BUFFER_USAGE_ALL, allocation_size, &buffer)); iree_status_t status = iree_trace_replay_parse_hal_buffer( - replay, document, contents_node, buffer); + replay, document, contents_node, element_type, buffer); if (!iree_status_is_ok(status)) { iree_hal_buffer_release(buffer); return status; @@ -465,6 +513,25 @@ return status; } +// Parses a !hal.buffer_view in tensor form and appends it to |target_list|. +// +// ```yaml +// !tensor 4xf32=[0 1 2 3] +// ``` +static iree_status_t iree_trace_replay_parse_inline_hal_buffer_view( + iree_trace_replay_t* replay, yaml_document_t* document, + yaml_node_t* value_node, iree_vm_list_t* target_list) { + iree_hal_buffer_view_t* buffer_view = NULL; + IREE_RETURN_IF_ERROR(iree_hal_buffer_view_parse( + iree_yaml_node_as_string(value_node), + iree_hal_device_allocator(replay->device), &buffer_view)); + iree_vm_ref_t buffer_view_ref = iree_hal_buffer_view_move_ref(buffer_view); + iree_status_t status = + iree_vm_list_push_ref_move(target_list, &buffer_view_ref); + iree_vm_ref_release(&buffer_view_ref); + return status; +} + // Parses a typed item from |value_node| and appends it to |target_list|. // // ```yaml @@ -473,10 +540,19 @@ // - type: value // i8: 7 // ``` +// or +// ```yaml +// !hal.buffer_view 4xf32=[0 1 2 3] +// ``` static iree_status_t iree_trace_replay_parse_item(iree_trace_replay_t* replay, yaml_document_t* document, yaml_node_t* value_node, iree_vm_list_t* target_list) { + if (strcmp(value_node->tag, "!hal.buffer_view") == 0) { + return iree_trace_replay_parse_inline_hal_buffer_view( + replay, document, value_node, target_list); + } + yaml_node_t* type_node = NULL; IREE_RETURN_IF_ERROR(iree_yaml_mapping_find( document, value_node, iree_make_cstring_view("type"), &type_node));
diff --git a/iree/vm/BUILD b/iree/vm/BUILD index 2e1127e..9c200dc 100644 --- a/iree/vm/BUILD +++ b/iree/vm/BUILD
@@ -211,7 +211,7 @@ # tblgen = "//iree/tools:iree-tblgen", # td_file = "//iree/compiler/Dialect/VM/IR:VMOps.td", # td_srcs = [ -# "//iree/compiler/Dialect/IREE/IR:td_files", +# "//iree/compiler/Dialect/Util/IR:td_files", # "//iree/compiler/Dialect/VM/IR:td_files", # "@llvm-project//mlir:OpBaseTdFiles", # "@llvm-project//mlir:include/mlir/IR/SymbolInterfaces.td",
diff --git a/iree/vm/bytecode_dispatch.c b/iree/vm/bytecode_dispatch.c index 8dfca72..e290a95 100644 --- a/iree/vm/bytecode_dispatch.c +++ b/iree/vm/bytecode_dispatch.c
@@ -1540,8 +1540,8 @@ // Extension trampolines //===------------------------------------------------------------------===// - BEGIN_DISPATCH_PREFIX(PrefixExtI64, EXT_I64) { #if IREE_VM_EXT_I64_ENABLE + BEGIN_DISPATCH_PREFIX(PrefixExtI64, EXT_I64) { //===----------------------------------------------------------------===// // ExtI64: Globals //===----------------------------------------------------------------===// @@ -1802,15 +1802,14 @@ IREE_RETURN_IF_ERROR(iree_vm_buffer_write_elements( &value, buffer, offset, 1, sizeof(uint64_t))); }); - -#else - return iree_make_status(IREE_STATUS_UNIMPLEMENTED); -#endif // IREE_VM_EXT_I64_ENABLE } END_DISPATCH_PREFIX(); +#else + UNHANDLED_DISPATCH_PREFIX(PrefixExtI64, EXT_I64); +#endif // IREE_VM_EXT_I64_ENABLE - BEGIN_DISPATCH_PREFIX(PrefixExtF32, EXT_F32) { #if IREE_VM_EXT_F32_ENABLE + BEGIN_DISPATCH_PREFIX(PrefixExtF32, EXT_F32) { //===----------------------------------------------------------------===// // ExtF32: Globals //===----------------------------------------------------------------===// @@ -2077,12 +2076,11 @@ IREE_RETURN_IF_ERROR(iree_vm_buffer_write_elements( &value, buffer, offset, 1, sizeof(float))); }); - -#else - return iree_make_status(IREE_STATUS_UNIMPLEMENTED); -#endif // IREE_VM_EXT_F32_ENABLE } END_DISPATCH_PREFIX(); +#else + UNHANDLED_DISPATCH_PREFIX(PrefixExtF32, EXT_F32); +#endif // IREE_VM_EXT_F32_ENABLE DISPATCH_OP(CORE, PrefixExtF64, { return iree_make_status(IREE_STATUS_UNIMPLEMENTED); });
diff --git a/iree/vm/bytecode_dispatch_util.h b/iree/vm/bytecode_dispatch_util.h index 81bb1e6..bce7fe8 100644 --- a/iree/vm/bytecode_dispatch_util.h +++ b/iree/vm/bytecode_dispatch_util.h
@@ -329,7 +329,12 @@ VMCHECK(0); \ return iree_make_status(IREE_STATUS_UNIMPLEMENTED, "unhandled opcode"); \ } -#define DISPATCH_UNHANDLED_EXT() +#define UNHANDLED_DISPATCH_PREFIX(op_name, ext) \ + _dispatch_CORE_##op_name : { \ + VMCHECK(0); \ + return iree_make_status(IREE_STATUS_UNIMPLEMENTED, \ + "unhandled dispatch extension " #ext); \ + } #define DISPATCH_OP(ext, op_name, body) \ _dispatch_##ext##_##op_name : IREE_DISPATCH_LOG_OPCODE(#op_name); \ @@ -359,11 +364,11 @@ return iree_make_status(IREE_STATUS_UNIMPLEMENTED, \ "unhandled core opcode"); \ } -#define DISPATCH_UNHANDLED_EXT \ - () default : { \ - VMCHECK(0); \ - return iree_make_status(IREE_STATUS_UNIMPLEMENTED, \ - "unhandled extension opcode"); \ +#define UNHANDLED_DISPATCH_PREFIX(op_name, ext) \ + case IREE_VM_OP_CORE_##op_name: { \ + VMCHECK(0); \ + return iree_make_status(IREE_STATUS_UNIMPLEMENTED, \ + "unhandled dispatch extension " #ext); \ } #define DISPATCH_OP(ext, op_name, body) \
diff --git a/iree/vm/module.c b/iree/vm/module.c index e4da546..5228d4f 100644 --- a/iree/vm/module.c +++ b/iree/vm/module.c
@@ -288,10 +288,8 @@ IREE_API_EXPORT iree_string_view_t iree_vm_function_reflection_attr( const iree_vm_function_t* function, iree_string_view_t key) { - IREE_TRACE_ZONE_BEGIN(z0); iree_vm_module_t* module = function->module; if (!module->get_function_reflection_attr) { - IREE_TRACE_ZONE_END(z0); return iree_string_view_empty(); } for (int index = 0;; ++index) { @@ -303,12 +301,10 @@ iree_status_ignore(status); break; } - if (iree_string_view_compare(key, index_key) == 0) { - IREE_TRACE_ZONE_END(z0); + if (iree_string_view_equal(key, index_key)) { return index_value; } } - IREE_TRACE_ZONE_END(z0); return iree_string_view_empty(); }
diff --git a/iree/vm/native_module_packing.h b/iree/vm/native_module_packing.h index 2a2b820..db98523 100644 --- a/iree/vm/native_module_packing.h +++ b/iree/vm/native_module_packing.h
@@ -428,7 +428,7 @@ } }; -// An `iree.byte_buffer` containing a string. +// An `util.byte_buffer` containing a string. // The string view is aliased directly into the underlying byte buffer. template <> struct ParamUnpack<iree_string_view_t> {
diff --git a/iree/vm/ref.c b/iree/vm/ref.c index ca3f472..623e42d 100644 --- a/iree/vm/ref.c +++ b/iree/vm/ref.c
@@ -98,8 +98,8 @@ iree_vm_ref_lookup_registered_type(iree_string_view_t full_name) { for (int i = 1; i <= IREE_VM_MAX_TYPE_ID; ++i) { if (!iree_vm_ref_type_descriptors[i]) break; - if (iree_string_view_compare(iree_vm_ref_type_descriptors[i]->type_name, - full_name) == 0) { + if (iree_string_view_equal(iree_vm_ref_type_descriptors[i]->type_name, + full_name)) { return iree_vm_ref_type_descriptors[i]; } }
diff --git a/iree/vm/test/arithmetic_ops.mlir b/iree/vm/test/arithmetic_ops.mlir index 525bf45..db247ee 100644 --- a/iree/vm/test/arithmetic_ops.mlir +++ b/iree/vm/test/arithmetic_ops.mlir
@@ -7,7 +7,7 @@ vm.export @test_add_i32 vm.func @test_add_i32() { %c1 = vm.const.i32 1 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %v = vm.add.i32 %c1dno, %c1dno : i32 %c2 = vm.const.i32 2 : i32 vm.check.eq %v, %c2, "1+1=2" : i32 @@ -17,9 +17,9 @@ vm.export @test_sub_i32 vm.func @test_sub_i32() { %c1 = vm.const.i32 3 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %c2 = vm.const.i32 2 : i32 - %c2dno = iree.do_not_optimize(%c2) : i32 + %c2dno = util.do_not_optimize(%c2) : i32 %v = vm.sub.i32 %c1dno, %c2dno : i32 %c3 = vm.const.i32 1 : i32 vm.check.eq %v, %c3, "3-2=1" : i32 @@ -29,7 +29,7 @@ vm.export @test_mul_i32 vm.func @test_mul_i32() { %c1 = vm.const.i32 2 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %v = vm.mul.i32 %c1dno, %c1dno : i32 %c2 = vm.const.i32 4 : i32 vm.check.eq %v, %c2, "2*2=4" : i32 @@ -39,9 +39,9 @@ vm.export @test_div_i32s vm.func @test_div_i32s() { %c1 = vm.const.i32 4 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %c2 = vm.const.i32 -2 : i32 - %c2dno = iree.do_not_optimize(%c2) : i32 + %c2dno = util.do_not_optimize(%c2) : i32 %v = vm.div.i32.s %c1dno, %c2dno : i32 %c3 = vm.const.i32 -2 : i32 vm.check.eq %v, %c3, "4/-2=-2" : i32 @@ -51,9 +51,9 @@ vm.export @test_div_i32u vm.func @test_div_i32u() { %c1 = vm.const.i32 4 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %c2 = vm.const.i32 2 : i32 - %c2dno = iree.do_not_optimize(%c2) : i32 + %c2dno = util.do_not_optimize(%c2) : i32 %v = vm.div.i32.u %c1dno, %c2dno : i32 %c3 = vm.const.i32 2 : i32 vm.check.eq %v, %c3, "4/2=2" : i32 @@ -63,9 +63,9 @@ vm.export @test_rem_i32s vm.func @test_rem_i32s() { %c1 = vm.const.i32 -3 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %c2 = vm.const.i32 -2 : i32 - %c2dno = iree.do_not_optimize(%c2) : i32 + %c2dno = util.do_not_optimize(%c2) : i32 %v = vm.rem.i32.s %c1dno, %c2dno : i32 %c3 = vm.const.i32 -1 : i32 vm.check.eq %v, %c3, "-3%-2=-1" : i32 @@ -75,9 +75,9 @@ vm.export @test_rem_i32u vm.func @test_rem_i32u() { %c1 = vm.const.i32 3 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %c2 = vm.const.i32 2 : i32 - %c2dno = iree.do_not_optimize(%c2) : i32 + %c2dno = util.do_not_optimize(%c2) : i32 %v = vm.rem.i32.u %c1dno, %c2dno : i32 %c3 = vm.const.i32 1 : i32 vm.check.eq %v, %c3, "3%2=1" : i32 @@ -87,11 +87,11 @@ vm.export @test_fma_i32 vm.func @test_fma_i32() { %c2 = vm.const.i32 2 : i32 - %c2dno = iree.do_not_optimize(%c2) : i32 + %c2dno = util.do_not_optimize(%c2) : i32 %c3 = vm.const.i32 3 : i32 - %c3dno = iree.do_not_optimize(%c3) : i32 + %c3dno = util.do_not_optimize(%c3) : i32 %c5 = vm.const.i32 5 : i32 - %c5dno = iree.do_not_optimize(%c5) : i32 + %c5dno = util.do_not_optimize(%c5) : i32 %v = vm.fma.i32 %c2dno, %c3dno, %c5dno : i32 %c11 = vm.const.i32 11 : i32 vm.check.eq %v, %c11, "2*3+5=11" : i32 @@ -101,7 +101,7 @@ vm.export @test_not_i32 vm.func @test_not_i32() { %c1 = vm.const.i32 0 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %v = vm.not.i32 %c1dno : i32 %c2 = vm.const.i32 -1 : i32 vm.check.eq %v, %c2, "~0=-1" : i32 @@ -111,9 +111,9 @@ vm.export @test_and_i32 vm.func @test_and_i32() { %c1 = vm.const.i32 5 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %c2 = vm.const.i32 3 : i32 - %c2dno = iree.do_not_optimize(%c2) : i32 + %c2dno = util.do_not_optimize(%c2) : i32 %v = vm.and.i32 %c1dno, %c2dno : i32 %c3 = vm.const.i32 1 : i32 vm.check.eq %v, %c3, "5&3=1" : i32 @@ -123,9 +123,9 @@ vm.export @test_or_i32 vm.func @test_or_i32() { %c1 = vm.const.i32 5 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %c2 = vm.const.i32 3 : i32 - %c2dno = iree.do_not_optimize(%c2) : i32 + %c2dno = util.do_not_optimize(%c2) : i32 %v = vm.or.i32 %c1dno, %c2dno : i32 %c3 = vm.const.i32 7 : i32 vm.check.eq %v, %c3, "5|3=7" : i32 @@ -135,9 +135,9 @@ vm.export @test_xor_i32 vm.func @test_xor_i32() { %c1 = vm.const.i32 5 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %c2 = vm.const.i32 3 : i32 - %c2dno = iree.do_not_optimize(%c2) : i32 + %c2dno = util.do_not_optimize(%c2) : i32 %v = vm.xor.i32 %c1dno, %c2dno : i32 %c3 = vm.const.i32 6 : i32 vm.check.eq %v, %c3, "5^3=6" : i32
diff --git a/iree/vm/test/arithmetic_ops_f32.mlir b/iree/vm/test/arithmetic_ops_f32.mlir index f3c2e11..5d69f37 100644 --- a/iree/vm/test/arithmetic_ops_f32.mlir +++ b/iree/vm/test/arithmetic_ops_f32.mlir
@@ -7,7 +7,7 @@ vm.export @test_add_f32 vm.func @test_add_f32() { %c1 = vm.const.f32 1.5 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.add.f32 %c1dno, %c1dno : f32 %c2 = vm.const.f32 3.0 : f32 vm.check.eq %v, %c2, "1.5+1.5=3" : f32 @@ -17,9 +17,9 @@ vm.export @test_sub_f32 vm.func @test_sub_f32() { %c1 = vm.const.f32 3.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %c2 = vm.const.f32 2.5 : f32 - %c2dno = iree.do_not_optimize(%c2) : f32 + %c2dno = util.do_not_optimize(%c2) : f32 %v = vm.sub.f32 %c1dno, %c2dno : f32 %c3 = vm.const.f32 0.5 : f32 vm.check.eq %v, %c3, "3.0-2.5=0.5" : f32 @@ -29,7 +29,7 @@ vm.export @test_mul_f32 vm.func @test_mul_f32() { %c1 = vm.const.f32 2.5 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.mul.f32 %c1dno, %c1dno : f32 %c2 = vm.const.f32 6.25 : f32 vm.check.eq %v, %c2, "2.5*2.5=6.25" : f32 @@ -39,9 +39,9 @@ vm.export @test_div_f32 vm.func @test_div_f32() { %c1 = vm.const.f32 4.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %c2 = vm.const.f32 -2.0 : f32 - %c2dno = iree.do_not_optimize(%c2) : f32 + %c2dno = util.do_not_optimize(%c2) : f32 %v = vm.div.f32 %c1dno, %c2dno : f32 %c3 = vm.const.f32 -2.0 : f32 vm.check.eq %v, %c3, "4.0/-2.0=-2.0" : f32 @@ -51,9 +51,9 @@ vm.export @test_rem_f32 vm.func @test_rem_f32() { %c1 = vm.const.f32 -3.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %c2 = vm.const.f32 -2.0 : f32 - %c2dno = iree.do_not_optimize(%c2) : f32 + %c2dno = util.do_not_optimize(%c2) : f32 %v = vm.rem.f32 %c1dno, %c2dno : f32 %c3 = vm.const.f32 1.0 : f32 vm.check.eq %v, %c3, "-3.0%-2.0=1.0" : f32 @@ -63,11 +63,11 @@ vm.export @test_fma_f32 vm.func @test_fma_f32() { %c2 = vm.const.f32 2.0 : f32 - %c2dno = iree.do_not_optimize(%c2) : f32 + %c2dno = util.do_not_optimize(%c2) : f32 %c3 = vm.const.f32 3.0 : f32 - %c3dno = iree.do_not_optimize(%c3) : f32 + %c3dno = util.do_not_optimize(%c3) : f32 %c5 = vm.const.f32 5.0 : f32 - %c5dno = iree.do_not_optimize(%c5) : f32 + %c5dno = util.do_not_optimize(%c5) : f32 %v = vm.fma.f32 %c2dno, %c3dno, %c5dno : f32 %c11 = vm.const.f32 11.0 : f32 vm.check.eq %v, %c11, "2.0*3.0+5.0=11.0" : f32 @@ -77,7 +77,7 @@ vm.export @test_abs_f32 vm.func @test_abs_f32() { %c1 = vm.const.f32 -1.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.abs.f32 %c1dno : f32 %c2 = vm.const.f32 1.0 : f32 vm.check.eq %v, %c2, "abs(-1.0)=1.0" : f32 @@ -87,7 +87,7 @@ vm.export @test_neg_f32 vm.func @test_neg_f32() { %c1 = vm.const.f32 -1.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.neg.f32 %c1dno : f32 %c2 = vm.const.f32 1.0 : f32 vm.check.eq %v, %c2, "neg(-1.0)=1.0" : f32 @@ -97,7 +97,7 @@ vm.export @test_ceil_f32 vm.func @test_ceil_f32() { %c1 = vm.const.f32 1.5 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.ceil.f32 %c1dno : f32 %c2 = vm.const.f32 2.0 : f32 vm.check.eq %v, %c2, "ceil(1.5)=2.0" : f32 @@ -107,7 +107,7 @@ vm.export @test_floor_f32 vm.func @test_floor_f32() { %c1 = vm.const.f32 1.5 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.floor.f32 %c1dno : f32 %c2 = vm.const.f32 1.0 : f32 vm.check.eq %v, %c2, "floor(1.5)=1.0" : f32 @@ -117,7 +117,7 @@ vm.export @test_atan_f32 vm.func @test_atan_f32() { %c1 = vm.const.f32 1.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.atan.f32 %c1dno : f32 %c2 = vm.const.f32 0.7853981633974483: f32 vm.check.eq %v, %c2, "atan(1.0)=0.7853981633974483" : f32 @@ -127,9 +127,9 @@ vm.export @test_atan2_f32 vm.func @test_atan2_f32() { %c1 = vm.const.f32 1.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %c2 = vm.const.f32 0.0 : f32 - %c2dno = iree.do_not_optimize(%c2) : f32 + %c2dno = util.do_not_optimize(%c2) : f32 %v = vm.atan2.f32 %c1dno, %c2dno : f32 %c3 = vm.const.f32 1.5707963267948966 : f32 vm.check.eq %v, %c3, "atan2(1.0,0.0)=1.5707963267948966" : f32 @@ -139,7 +139,7 @@ vm.export @test_cos_f32 vm.func @test_cos_f32() { %c1 = vm.const.f32 0.5 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.cos.f32 %c1dno : f32 %c2 = vm.const.f32 0.8775825618903728: f32 vm.check.eq %v, %c2, "cos(0.5)=0.8775825618903728" : f32 @@ -149,7 +149,7 @@ vm.export @test_sin_f32 vm.func @test_sin_f32() { %c1 = vm.const.f32 0.5 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.sin.f32 %c1dno : f32 %c2 = vm.const.f32 0.479425538604203: f32 vm.check.eq %v, %c2, "sin(0.5)=0.479425538604203" : f32 @@ -159,7 +159,7 @@ vm.export @test_exp_f32 vm.func @test_exp_f32() { %c1 = vm.const.f32 1.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.exp.f32 %c1dno : f32 %c2 = vm.const.f32 2.718281828459045: f32 vm.check.eq %v, %c2, "exp(1.0)=2.718281828459045" : f32 @@ -169,7 +169,7 @@ vm.export @test_exp2_f32 vm.func @test_exp2_f32() { %c1 = vm.const.f32 2.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.exp2.f32 %c1dno : f32 %c2 = vm.const.f32 4.0: f32 vm.check.eq %v, %c2, "exp(2.0)=4.0" : f32 @@ -179,7 +179,7 @@ vm.export @test_expm1_f32 vm.func @test_expm1_f32() { %c1 = vm.const.f32 2.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.expm1.f32 %c1dno : f32 %c2 = vm.const.f32 6.38905609893065: f32 vm.check.eq %v, %c2, "expm1(2.0)=6.38905609893065" : f32 @@ -189,7 +189,7 @@ vm.export @test_log_f32 vm.func @test_log_f32() { %c1 = vm.const.f32 10.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.log.f32 %c1dno : f32 %c2 = vm.const.f32 2.302585092994046: f32 vm.check.eq %v, %c2, "log(10.0)=2.302585092994046" : f32 @@ -199,7 +199,7 @@ vm.export @test_log10_f32 vm.func @test_log10_f32() { %c1 = vm.const.f32 10.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.log10.f32 %c1dno : f32 %c2 = vm.const.f32 1.0: f32 vm.check.eq %v, %c2, "log10(10.0)=1.0" : f32 @@ -209,7 +209,7 @@ vm.export @test_log1p_f32 vm.func @test_log1p_f32() { %c1 = vm.const.f32 10.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.log1p.f32 %c1dno : f32 %c2 = vm.const.f32 2.3978952727983707: f32 vm.check.eq %v, %c2, "log1p(10.0)=2.3978952727983707" : f32 @@ -219,7 +219,7 @@ vm.export @test_log2_f32 vm.func @test_log2_f32() { %c1 = vm.const.f32 10.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.log2.f32 %c1dno : f32 %c2 = vm.const.f32 3.321928094887362: f32 vm.check.eq %v, %c2, "log2(10.0)=3.321928094887362" : f32 @@ -229,9 +229,9 @@ vm.export @test_pow_f32 vm.func @test_pow_f32() { %c1 = vm.const.f32 3.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %c2 = vm.const.f32 2.0 : f32 - %c2dno = iree.do_not_optimize(%c2) : f32 + %c2dno = util.do_not_optimize(%c2) : f32 %v = vm.pow.f32 %c1dno, %c2dno : f32 %c3 = vm.const.f32 9.0 : f32 vm.check.eq %v, %c3, "pow(3.0,2.0)=9.0" : f32 @@ -241,7 +241,7 @@ vm.export @test_rsqrt_f32 vm.func @test_rsqrt_f32() { %c1 = vm.const.f32 4.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.rsqrt.f32 %c1dno : f32 %c2 = vm.const.f32 0.5: f32 vm.check.eq %v, %c2, "rsqrt(4.0)=0.5" : f32 @@ -251,7 +251,7 @@ vm.export @test_sqrt_f32 vm.func @test_sqrt_f32() { %c1 = vm.const.f32 4.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.sqrt.f32 %c1dno : f32 %c2 = vm.const.f32 2.0: f32 vm.check.eq %v, %c2, "sqrt(4.0)=2.0" : f32 @@ -261,7 +261,7 @@ vm.export @test_tanh_f32 vm.func @test_tanh_f32() { %c1 = vm.const.f32 0.5 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.tanh.f32 %c1dno : f32 %c2 = vm.const.f32 0.46211715726000974: f32 vm.check.eq %v, %c2, "tanh(0.5)=0.46211715726000974" : f32
diff --git a/iree/vm/test/arithmetic_ops_i64.mlir b/iree/vm/test/arithmetic_ops_i64.mlir index 253639c..e572420 100644 --- a/iree/vm/test/arithmetic_ops_i64.mlir +++ b/iree/vm/test/arithmetic_ops_i64.mlir
@@ -7,7 +7,7 @@ vm.export @test_add_i64 vm.func @test_add_i64() { %c1 = vm.const.i64 1 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %v = vm.add.i64 %c1dno, %c1dno : i64 %c2 = vm.const.i64 2 : i64 vm.check.eq %v, %c2, "1+1=2" : i64 @@ -17,9 +17,9 @@ vm.export @test_sub_i64 vm.func @test_sub_i64() { %c1 = vm.const.i64 3 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %c2 = vm.const.i64 2 : i64 - %c2dno = iree.do_not_optimize(%c2) : i64 + %c2dno = util.do_not_optimize(%c2) : i64 %v = vm.sub.i64 %c1dno, %c2dno : i64 %c3 = vm.const.i64 1 : i64 vm.check.eq %v, %c3, "3-2=1" : i64 @@ -29,7 +29,7 @@ vm.export @test_mul_i64 vm.func @test_mul_i64() { %c1 = vm.const.i64 2 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %v = vm.mul.i64 %c1dno, %c1dno : i64 %c2 = vm.const.i64 4 : i64 vm.check.eq %v, %c2, "2*2=4" : i64 @@ -39,9 +39,9 @@ vm.export @test_div_i64s vm.func @test_div_i64s() { %c1 = vm.const.i64 4 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %c2 = vm.const.i64 -2 : i64 - %c2dno = iree.do_not_optimize(%c2) : i64 + %c2dno = util.do_not_optimize(%c2) : i64 %v = vm.div.i64.s %c1dno, %c2dno : i64 %c3 = vm.const.i64 -2 : i64 vm.check.eq %v, %c3, "4/-2=-2" : i64 @@ -51,9 +51,9 @@ vm.export @test_div_i64u vm.func @test_div_i64u() { %c1 = vm.const.i64 4 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %c2 = vm.const.i64 2 : i64 - %c2dno = iree.do_not_optimize(%c2) : i64 + %c2dno = util.do_not_optimize(%c2) : i64 %v = vm.div.i64.u %c1dno, %c2dno : i64 %c3 = vm.const.i64 2 : i64 vm.check.eq %v, %c3, "4/2=2" : i64 @@ -63,9 +63,9 @@ vm.export @test_rem_i64s vm.func @test_rem_i64s() { %c1 = vm.const.i64 -3 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %c2 = vm.const.i64 -2 : i64 - %c2dno = iree.do_not_optimize(%c2) : i64 + %c2dno = util.do_not_optimize(%c2) : i64 %v = vm.rem.i64.s %c1dno, %c2dno : i64 %c3 = vm.const.i64 -1 : i64 vm.check.eq %v, %c3, "-3%-2=-1" : i64 @@ -75,9 +75,9 @@ vm.export @test_rem_i64u vm.func @test_rem_i64u() { %c1 = vm.const.i64 3 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %c2 = vm.const.i64 2 : i64 - %c2dno = iree.do_not_optimize(%c2) : i64 + %c2dno = util.do_not_optimize(%c2) : i64 %v = vm.rem.i64.u %c1dno, %c2dno : i64 %c3 = vm.const.i64 1 : i64 vm.check.eq %v, %c3, "3%2=1" : i64 @@ -87,11 +87,11 @@ vm.export @test_fma_i64 vm.func @test_fma_i64() { %c2 = vm.const.i64 2 : i64 - %c2dno = iree.do_not_optimize(%c2) : i64 + %c2dno = util.do_not_optimize(%c2) : i64 %c3 = vm.const.i64 3 : i64 - %c3dno = iree.do_not_optimize(%c3) : i64 + %c3dno = util.do_not_optimize(%c3) : i64 %c5 = vm.const.i64 5 : i64 - %c5dno = iree.do_not_optimize(%c5) : i64 + %c5dno = util.do_not_optimize(%c5) : i64 %v = vm.fma.i64 %c2dno, %c3dno, %c5dno : i64 %c11 = vm.const.i64 11 : i64 vm.check.eq %v, %c11, "2*3+5=11" : i64 @@ -101,7 +101,7 @@ vm.export @test_not_i64 vm.func @test_not_i64() { %c1 = vm.const.i64 0 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %v = vm.not.i64 %c1dno : i64 %c2 = vm.const.i64 -1 : i64 vm.check.eq %v, %c2, "~0=-1" : i64 @@ -111,9 +111,9 @@ vm.export @test_and_i64 vm.func @test_and_i64() { %c1 = vm.const.i64 5 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %c2 = vm.const.i64 3 : i64 - %c2dno = iree.do_not_optimize(%c2) : i64 + %c2dno = util.do_not_optimize(%c2) : i64 %v = vm.and.i64 %c1dno, %c2dno : i64 %c3 = vm.const.i64 1 : i64 vm.check.eq %v, %c3, "5&3=1" : i64 @@ -123,9 +123,9 @@ vm.export @test_or_i64 vm.func @test_or_i64() { %c1 = vm.const.i64 5 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %c2 = vm.const.i64 3 : i64 - %c2dno = iree.do_not_optimize(%c2) : i64 + %c2dno = util.do_not_optimize(%c2) : i64 %v = vm.or.i64 %c1dno, %c2dno : i64 %c3 = vm.const.i64 7 : i64 vm.check.eq %v, %c3, "5|3=7" : i64 @@ -135,9 +135,9 @@ vm.export @test_xor_i64 vm.func @test_xor_i64() { %c1 = vm.const.i64 5 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %c2 = vm.const.i64 3 : i64 - %c2dno = iree.do_not_optimize(%c2) : i64 + %c2dno = util.do_not_optimize(%c2) : i64 %v = vm.xor.i64 %c1dno, %c2dno : i64 %c3 = vm.const.i64 6 : i64 vm.check.eq %v, %c3, "5^3=6" : i64
diff --git a/iree/vm/test/assignment_ops.mlir b/iree/vm/test/assignment_ops.mlir index d479392..7b3d282 100644 --- a/iree/vm/test/assignment_ops.mlir +++ b/iree/vm/test/assignment_ops.mlir
@@ -7,9 +7,9 @@ vm.export @test_select_i32 vm.func @test_select_i32() { %c0 = vm.const.i32 0 : i32 - %c0dno = iree.do_not_optimize(%c0) : i32 + %c0dno = util.do_not_optimize(%c0) : i32 %c1 = vm.const.i32 1 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %v1 = vm.select.i32 %c0dno, %c0dno, %c1dno : i32 vm.check.eq %v1, %c1, "0 ? 0 : 1 = 1" : i32 %v2 = vm.select.i32 %c1dno, %c0dno, %c1dno : i32 @@ -24,7 +24,7 @@ %c1 = vm.const.i32 1 : i32 %list1 = vm.list.alloc %c1 : (i32) -> !vm.list<i8> %cond = vm.const.i32 0 : i32 - %cond_dno = iree.do_not_optimize(%cond) : i32 + %cond_dno = util.do_not_optimize(%cond) : i32 %list = vm.select.ref %cond_dno, %list0, %list1 : !vm.list<i8> vm.check.eq %list, %list1, "0 ? list0 : list1 = list1" : !vm.list<i8> vm.return
diff --git a/iree/vm/test/assignment_ops_f32.mlir b/iree/vm/test/assignment_ops_f32.mlir index e06f650..dd6c547 100644 --- a/iree/vm/test/assignment_ops_f32.mlir +++ b/iree/vm/test/assignment_ops_f32.mlir
@@ -7,9 +7,9 @@ vm.export @test_select_f32 vm.func @test_select_f32() { %c0 = vm.const.i32 0 : i32 - %c0dno = iree.do_not_optimize(%c0) : i32 + %c0dno = util.do_not_optimize(%c0) : i32 %c1 = vm.const.i32 1 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %c2 = vm.const.f32 0.0 : f32 %c3 = vm.const.f32 1.0 : f32 %v1 = vm.select.f32 %c0dno, %c2, %c3 : f32
diff --git a/iree/vm/test/assignment_ops_i64.mlir b/iree/vm/test/assignment_ops_i64.mlir index 00370ab..6e39382 100644 --- a/iree/vm/test/assignment_ops_i64.mlir +++ b/iree/vm/test/assignment_ops_i64.mlir
@@ -7,9 +7,9 @@ vm.export @test_select_i64 vm.func @test_select_i64() { %c0 = vm.const.i32 0 : i32 - %c0dno = iree.do_not_optimize(%c0) : i32 + %c0dno = util.do_not_optimize(%c0) : i32 %c1 = vm.const.i32 1 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %c2 = vm.const.i64 0 : i64 %c3 = vm.const.i64 1 : i64 %v1 = vm.select.i64 %c0dno, %c2, %c3 : i64
diff --git a/iree/vm/test/buffer_ops.mlir b/iree/vm/test/buffer_ops.mlir index 315017c..1b44078 100644 --- a/iree/vm/test/buffer_ops.mlir +++ b/iree/vm/test/buffer_ops.mlir
@@ -16,8 +16,8 @@ vm.func private @test_compare() { %rodata_a = vm.const.ref.rodata @rodata_cmp_3xi32_a : !vm.buffer %rodata_b = vm.const.ref.rodata @rodata_cmp_3xi32_b : !vm.buffer - %rodata_a_dno = iree.do_not_optimize(%rodata_a) : !vm.buffer - %rodata_b_dno = iree.do_not_optimize(%rodata_b) : !vm.buffer + %rodata_a_dno = util.do_not_optimize(%rodata_a) : !vm.buffer + %rodata_b_dno = util.do_not_optimize(%rodata_b) : !vm.buffer %c0 = vm.const.i32 0 : i32 %length = vm.buffer.length %rodata_a_dno : !vm.buffer -> i32 @@ -36,8 +36,8 @@ vm.func private @test_compare_empty() { %rodata_a = vm.const.ref.rodata @rodata_cmp_3xi32_a : !vm.buffer %rodata_b = vm.const.ref.rodata @rodata_cmp_3xi32_b : !vm.buffer - %rodata_a_dno = iree.do_not_optimize(%rodata_a) : !vm.buffer - %rodata_b_dno = iree.do_not_optimize(%rodata_b) : !vm.buffer + %rodata_a_dno = util.do_not_optimize(%rodata_a) : !vm.buffer + %rodata_b_dno = util.do_not_optimize(%rodata_b) : !vm.buffer %c0 = vm.const.i32 0 : i32 %c2 = vm.const.i32 2 : i32 @@ -57,7 +57,7 @@ vm.func private @test_alloc() { %c128 = vm.const.i32 128 : i32 %buf = vm.buffer.alloc %c128 : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer vm.check.nz %buf_dno, "!null" : !vm.buffer %buf_length = vm.buffer.length %buf_dno : !vm.buffer -> i32 @@ -71,7 +71,7 @@ vm.func private @test_alloc_empty() { %c0 = vm.const.i32 0 : i32 %buf = vm.buffer.alloc %c0 : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer vm.check.nz %buf_dno, "!null" : !vm.buffer %buf_length = vm.buffer.length %buf_dno : !vm.buffer -> i32 @@ -94,7 +94,7 @@ %c4 = vm.const.i32 4 : i32 %c8 = vm.const.i32 8 : i32 %buf = vm.buffer.clone %rodata, %c4, %c8 : !vm.buffer -> !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer vm.check.nz %buf_dno, "!null" : !vm.buffer // Compare the cloned range to the original. @@ -111,14 +111,14 @@ // Allocate source zero-length buffer. %c0 = vm.const.i32 0 : i32 %buf0 = vm.buffer.alloc %c0 : !vm.buffer - %buf0_dno = iree.do_not_optimize(%buf0) : !vm.buffer + %buf0_dno = util.do_not_optimize(%buf0) : !vm.buffer vm.check.nz %buf0_dno, "!null" : !vm.buffer %buf0_length = vm.buffer.length %buf0_dno : !vm.buffer -> i32 vm.check.eq %c0, %buf0_length, "buffer length == 0" : i32 // Clone it all (or, clone nothing?). %buf1 = vm.buffer.clone %buf0_dno, %c0, %c0 : !vm.buffer -> !vm.buffer - %buf1_dno = iree.do_not_optimize(%buf1) : !vm.buffer + %buf1_dno = util.do_not_optimize(%buf1) : !vm.buffer vm.check.nz %buf1_dno, "!null" : !vm.buffer %buf1_length = vm.buffer.length %buf1_dno : !vm.buffer -> i32 vm.check.eq %c0, %buf1_length, "buffer length == 0" : i32 @@ -131,7 +131,7 @@ vm.func private @fail_clone_out_of_range() { // Fetch source .rodata blob. %rodata = vm.const.ref.rodata @rodata_3xi32 : !vm.buffer - %rodata_dno = iree.do_not_optimize(%rodata) : !vm.buffer + %rodata_dno = util.do_not_optimize(%rodata) : !vm.buffer vm.check.nz %rodata_dno, "!null" : !vm.buffer // Try to clone off the end of the buffer. @@ -155,7 +155,7 @@ // Allocate target buffer. %buf = vm.buffer.alloc %rodata_length : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer vm.check.nz %buf_dno, "!null" : !vm.buffer // Copy the entire contents. @@ -177,7 +177,7 @@ // Allocate target buffer. %c4 = vm.const.i32 4 : i32 %buf = vm.buffer.alloc %c4 : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer vm.check.nz %buf_dno, "!null" : !vm.buffer // Copy the middle 4-byte element. @@ -199,7 +199,7 @@ %rodata = vm.const.ref.rodata @rodata_3xi32 : !vm.buffer %c128 = vm.const.i32 128 : i32 %buf = vm.buffer.alloc %c128 : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer vm.check.nz %buf_dno, "!null" : !vm.buffer // Try to clone off the end of the source buffer. @@ -215,7 +215,7 @@ %rodata = vm.const.ref.rodata @rodata_3xi32 : !vm.buffer %c128 = vm.const.i32 128 : i32 %buf = vm.buffer.alloc %c128 : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer vm.check.nz %buf_dno, "!null" : !vm.buffer // Try to clone off the end of the source buffer. @@ -233,7 +233,7 @@ %rodata_length = vm.buffer.length %rodata : !vm.buffer -> i32 %c8 = vm.const.i32 8 : i32 %buf = vm.buffer.alloc %c8 : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer vm.check.nz %buf_dno, "!null" : !vm.buffer // Try to clone off the end of the target buffer. @@ -249,7 +249,7 @@ %rodata = vm.const.ref.rodata @rodata_3xi32 : !vm.buffer %c8 = vm.const.i32 8 : i32 %buf = vm.buffer.alloc %c8 : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer vm.check.nz %buf_dno, "!null" : !vm.buffer // Try to clone off the end of the target buffer. @@ -271,7 +271,7 @@ // Allocate zeroed buffer. %c8 = vm.const.i32 8 : i32 %buf = vm.buffer.alloc %c8 : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer vm.check.nz %buf_dno, "!null" : !vm.buffer // Fill the middle two elements. @@ -297,7 +297,7 @@ // Allocate zeroed buffer. %c8 = vm.const.i32 8 : i32 %buf = vm.buffer.alloc %c8 : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer // Try filling from offset 1, which is not i16-aligned. %c1 = vm.const.i32 1 : i32 @@ -323,7 +323,7 @@ // Allocate zeroed buffer. %c8 = vm.const.i32 8 : i32 %buf = vm.buffer.alloc %c8 : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer // Try filling for length 1, which is not i16-aligned. %c0 = vm.const.i32 0 : i32 @@ -518,11 +518,11 @@ vm.export @test_store_i8 attributes {emitc.exclude} vm.func private @test_store_i8() { %ref = vm.const.ref.rodata @test_store_i8_ref : !vm.buffer - %ref_dno = iree.do_not_optimize(%ref) : !vm.buffer + %ref_dno = util.do_not_optimize(%ref) : !vm.buffer %ref_length = vm.buffer.length %ref_dno : !vm.buffer -> i32 %buf = vm.buffer.alloc %ref_length : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer %c0 = vm.const.i32 0 : i32 %e0 = vm.const.i32 0 : i32 @@ -551,11 +551,11 @@ vm.export @test_store_i16 attributes {emitc.exclude} vm.func private @test_store_i16() { %ref = vm.const.ref.rodata @test_store_i16_ref : !vm.buffer - %ref_dno = iree.do_not_optimize(%ref) : !vm.buffer + %ref_dno = util.do_not_optimize(%ref) : !vm.buffer %ref_length = vm.buffer.length %ref_dno : !vm.buffer -> i32 %buf = vm.buffer.alloc %ref_length : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer %c0 = vm.const.i32 0 : i32 %e0 = vm.const.i32 0 : i32 @@ -584,11 +584,11 @@ vm.export @test_store_i32 attributes {emitc.exclude} vm.func private @test_store_i32() { %ref = vm.const.ref.rodata @test_store_i32_ref : !vm.buffer - %ref_dno = iree.do_not_optimize(%ref) : !vm.buffer + %ref_dno = util.do_not_optimize(%ref) : !vm.buffer %ref_length = vm.buffer.length %ref_dno : !vm.buffer -> i32 %buf = vm.buffer.alloc %ref_length : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer %c0 = vm.const.i32 0 : i32 %e0 = vm.const.i32 0 : i32 @@ -617,7 +617,7 @@ vm.func private @test_store_i32_unaligned() { %c12 = vm.const.i32 12 : i32 %buf = vm.buffer.alloc %c12 : !vm.buffer - %buf_dno = iree.do_not_optimize(%buf) : !vm.buffer + %buf_dno = util.do_not_optimize(%buf) : !vm.buffer // Byte offset 5 rounded to byte offset 4 (element 1). %c5 = vm.const.i32 5 : i32
diff --git a/iree/vm/test/comparison_ops.mlir b/iree/vm/test/comparison_ops.mlir index 7161cb6..ac171a2 100644 --- a/iree/vm/test/comparison_ops.mlir +++ b/iree/vm/test/comparison_ops.mlir
@@ -7,9 +7,9 @@ vm.export @test_cmp_lt_s_0 vm.func @test_cmp_lt_s_0() { %lhs = vm.const.i32 2 : i32 - %lhs_dno = iree.do_not_optimize(%lhs) : i32 + %lhs_dno = util.do_not_optimize(%lhs) : i32 %rhs = vm.const.i32 -2 : i32 - %rhs_dno = iree.do_not_optimize(%rhs) : i32 + %rhs_dno = util.do_not_optimize(%rhs) : i32 %actual = vm.cmp.lt.i32.s %lhs_dno, %rhs_dno : i32 %expected = vm.const.i32 0 : i32 vm.check.eq %actual, %expected, "2 < -2" : i32 @@ -19,9 +19,9 @@ vm.export @test_cmp_lt_s_1 vm.func @test_cmp_lt_s_1() { %lhs = vm.const.i32 -2 : i32 - %lhs_dno = iree.do_not_optimize(%lhs) : i32 + %lhs_dno = util.do_not_optimize(%lhs) : i32 %rhs = vm.const.i32 2 : i32 - %rhs_dno = iree.do_not_optimize(%rhs) : i32 + %rhs_dno = util.do_not_optimize(%rhs) : i32 %actual = vm.cmp.lt.i32.s %lhs_dno, %rhs_dno : i32 %expected = vm.const.i32 1 : i32 vm.check.eq %actual, %expected, "-2 < 2" : i32 @@ -32,9 +32,9 @@ vm.export @test_cmp_lt_s_2 vm.func @test_cmp_lt_s_2() { %lhs = vm.const.i32 4294967295 : i32 - %lhs_dno = iree.do_not_optimize(%lhs) : i32 + %lhs_dno = util.do_not_optimize(%lhs) : i32 %rhs = vm.const.i32 2 : i32 - %rhs_dno = iree.do_not_optimize(%rhs) : i32 + %rhs_dno = util.do_not_optimize(%rhs) : i32 %actual = vm.cmp.lt.i32.s %lhs_dno, %rhs_dno : i32 %expected = vm.const.i32 1 : i32 vm.check.eq %actual, %expected, "4294967295 (UINT_MAX) < 2" : i32 @@ -48,9 +48,9 @@ vm.export @test_cmp_lt_u_0 vm.func @test_cmp_lt_u_0() { %lhs = vm.const.i32 2 : i32 - %lhs_dno = iree.do_not_optimize(%lhs) : i32 + %lhs_dno = util.do_not_optimize(%lhs) : i32 %rhs = vm.const.i32 -2 : i32 - %rhs_dno = iree.do_not_optimize(%rhs) : i32 + %rhs_dno = util.do_not_optimize(%rhs) : i32 %actual = vm.cmp.lt.i32.u %lhs_dno, %rhs_dno : i32 %expected = vm.const.i32 1 : i32 vm.check.eq %actual, %expected, "2 < -2 (as unsigned)" : i32 @@ -60,9 +60,9 @@ vm.export @test_cmp_lt_u_1 vm.func @test_cmp_lt_u_1() { %lhs = vm.const.i32 -2 : i32 - %lhs_dno = iree.do_not_optimize(%lhs) : i32 + %lhs_dno = util.do_not_optimize(%lhs) : i32 %rhs = vm.const.i32 2 : i32 - %rhs_dno = iree.do_not_optimize(%rhs) : i32 + %rhs_dno = util.do_not_optimize(%rhs) : i32 %actual = vm.cmp.lt.i32.u %lhs_dno, %rhs_dno : i32 %expected = vm.const.i32 0 : i32 vm.check.eq %actual, %expected, "-2 < 2 (as unsigned)" : i32 @@ -72,9 +72,9 @@ vm.export @test_cmp_lt_u_2 vm.func @test_cmp_lt_u_2() { %lhs = vm.const.i32 4294967295 : i32 - %lhs_dno = iree.do_not_optimize(%lhs) : i32 + %lhs_dno = util.do_not_optimize(%lhs) : i32 %rhs = vm.const.i32 2 : i32 - %rhs_dno = iree.do_not_optimize(%rhs) : i32 + %rhs_dno = util.do_not_optimize(%rhs) : i32 %actual = vm.cmp.lt.i32.u %lhs_dno, %rhs_dno : i32 %expected = vm.const.i32 0 : i32 vm.check.eq %actual, %expected, "4294967295 (UINT_MAX) < 2 (as unsigned)" : i32 @@ -94,9 +94,9 @@ %false = vm.const.i32 0 : i32 %cn2 = vm.const.i32 -2 : i32 - %cn2_dno = iree.do_not_optimize(%cn2) : i32 + %cn2_dno = util.do_not_optimize(%cn2) : i32 %c2 = vm.const.i32 2 : i32 - %c2_dno = iree.do_not_optimize(%c2) : i32 + %c2_dno = util.do_not_optimize(%c2) : i32 %cmp_0 = vm.cmp.lte.i32.s %cn2_dno, %c2_dno : i32 vm.check.eq %cmp_0, %true, "-2 <= 2" : i32 @@ -121,9 +121,9 @@ %false = vm.const.i32 0 : i32 %cn2 = vm.const.i32 -2 : i32 - %cn2_dno = iree.do_not_optimize(%cn2) : i32 + %cn2_dno = util.do_not_optimize(%cn2) : i32 %c2 = vm.const.i32 2 : i32 - %c2_dno = iree.do_not_optimize(%c2) : i32 + %c2_dno = util.do_not_optimize(%c2) : i32 %cmp_0 = vm.cmp.gt.i32.s %cn2_dno, %c2_dno : i32 vm.check.eq %cmp_0, %false, "-2 > 2" : i32 @@ -148,9 +148,9 @@ %false = vm.const.i32 0 : i32 %cn2 = vm.const.i32 -2 : i32 - %cn2_dno = iree.do_not_optimize(%cn2) : i32 + %cn2_dno = util.do_not_optimize(%cn2) : i32 %c2 = vm.const.i32 2 : i32 - %c2_dno = iree.do_not_optimize(%c2) : i32 + %c2_dno = util.do_not_optimize(%c2) : i32 %cmp_0 = vm.cmp.gte.i32.s %cn2_dno, %c2_dno : i32 vm.check.eq %cmp_0, %false, "-2 >= 2" : i32
diff --git a/iree/vm/test/comparison_ops_f32.mlir b/iree/vm/test/comparison_ops_f32.mlir index 337cc08..962d2dc 100644 --- a/iree/vm/test/comparison_ops_f32.mlir +++ b/iree/vm/test/comparison_ops_f32.mlir
@@ -7,9 +7,9 @@ vm.export @test_cmp_lt_0_f32 vm.func @test_cmp_lt_0_f32() { %lhs = vm.const.f32 4.0 : f32 - %lhs_dno = iree.do_not_optimize(%lhs) : f32 + %lhs_dno = util.do_not_optimize(%lhs) : f32 %rhs = vm.const.f32 -4.0 : f32 - %rhs_dno = iree.do_not_optimize(%rhs) : f32 + %rhs_dno = util.do_not_optimize(%rhs) : f32 %actual = vm.cmp.lt.f32.o %lhs_dno, %rhs_dno : f32 %expected = vm.const.i32 0 : i32 vm.check.eq %actual, %expected, "4.0 < -4.0" : i32 @@ -19,9 +19,9 @@ vm.export @test_cmp_lt_1_f32 vm.func @test_cmp_lt_1_f32() { %lhs = vm.const.f32 -4.0 : f32 - %lhs_dno = iree.do_not_optimize(%lhs) : f32 + %lhs_dno = util.do_not_optimize(%lhs) : f32 %rhs = vm.const.f32 4.0 : f32 - %rhs_dno = iree.do_not_optimize(%rhs) : f32 + %rhs_dno = util.do_not_optimize(%rhs) : f32 %actual = vm.cmp.lt.f32.o %lhs_dno, %rhs_dno : f32 %expected = vm.const.i32 1 : i32 vm.check.eq %actual, %expected, "-4.0 < 4.0" : i32 @@ -41,9 +41,9 @@ %false = vm.const.i32 0 : i32 %cn2 = vm.const.f32 -2.0 : f32 - %cn2_dno = iree.do_not_optimize(%cn2) : f32 + %cn2_dno = util.do_not_optimize(%cn2) : f32 %c2 = vm.const.f32 2.0 : f32 - %c2_dno = iree.do_not_optimize(%c2) : f32 + %c2_dno = util.do_not_optimize(%c2) : f32 %cmp_0 = vm.cmp.lte.f32.o %cn2_dno, %c2_dno : f32 vm.check.eq %cmp_0, %true, "-2 <= 2" : i32 @@ -61,9 +61,9 @@ %false = vm.const.i32 0 : i32 %cn2 = vm.const.f32 -2.0 : f32 - %cn2_dno = iree.do_not_optimize(%cn2) : f32 + %cn2_dno = util.do_not_optimize(%cn2) : f32 %c2 = vm.const.f32 2.0 : f32 - %c2_dno = iree.do_not_optimize(%c2) : f32 + %c2_dno = util.do_not_optimize(%c2) : f32 %cmp_0 = vm.cmp.gt.f32.o %cn2_dno, %c2_dno : f32 vm.check.eq %cmp_0, %false, "-2 > 2" : i32 @@ -81,9 +81,9 @@ %false = vm.const.i32 0 : i32 %cn2 = vm.const.f32 -2.0 : f32 - %cn2_dno = iree.do_not_optimize(%cn2) : f32 + %cn2_dno = util.do_not_optimize(%cn2) : f32 %c2 = vm.const.f32 2.0 : f32 - %c2_dno = iree.do_not_optimize(%c2) : f32 + %c2_dno = util.do_not_optimize(%c2) : f32 %cmp_0 = vm.cmp.gte.f32.o %cn2_dno, %c2_dno : f32 vm.check.eq %cmp_0, %false, "-2 >= 2" : i32
diff --git a/iree/vm/test/comparison_ops_i64.mlir b/iree/vm/test/comparison_ops_i64.mlir index 0f87162..0da02bf 100644 --- a/iree/vm/test/comparison_ops_i64.mlir +++ b/iree/vm/test/comparison_ops_i64.mlir
@@ -7,9 +7,9 @@ vm.export @test_cmp_lt_s_0_i64 vm.func @test_cmp_lt_s_0_i64() { %lhs = vm.const.i64 4294967295 : i64 - %lhs_dno = iree.do_not_optimize(%lhs) : i64 + %lhs_dno = util.do_not_optimize(%lhs) : i64 %rhs = vm.const.i64 -4294967295 : i64 - %rhs_dno = iree.do_not_optimize(%rhs) : i64 + %rhs_dno = util.do_not_optimize(%rhs) : i64 %actual = vm.cmp.lt.i64.s %lhs_dno, %rhs_dno : i64 %expected = vm.const.i32 0 : i32 vm.check.eq %actual, %expected, "4294967295 (UINT_MAX) < -4294967295 (UINT_MAX)" : i32 @@ -19,9 +19,9 @@ vm.export @test_cmp_lt_s_1_i64 vm.func @test_cmp_lt_s_1_i64() { %lhs = vm.const.i64 -4294967295 : i64 - %lhs_dno = iree.do_not_optimize(%lhs) : i64 + %lhs_dno = util.do_not_optimize(%lhs) : i64 %rhs = vm.const.i64 4294967295 : i64 - %rhs_dno = iree.do_not_optimize(%rhs) : i64 + %rhs_dno = util.do_not_optimize(%rhs) : i64 %actual = vm.cmp.lt.i64.s %lhs_dno, %rhs_dno : i64 %expected = vm.const.i32 1 : i32 vm.check.eq %actual, %expected, "-4294967295 (UINT_MAX) < 4294967295 (UINT_MAX)" : i32 @@ -32,9 +32,9 @@ vm.export @test_cmp_lt_s_2_i64 vm.func @test_cmp_lt_s_2_i64() { %lhs = vm.const.i64 18446744073709551615 : i64 - %lhs_dno = iree.do_not_optimize(%lhs) : i64 + %lhs_dno = util.do_not_optimize(%lhs) : i64 %rhs = vm.const.i64 2 : i64 - %rhs_dno = iree.do_not_optimize(%rhs) : i64 + %rhs_dno = util.do_not_optimize(%rhs) : i64 %actual = vm.cmp.lt.i64.s %lhs_dno, %rhs_dno : i64 %expected = vm.const.i32 1 : i32 vm.check.eq %actual, %expected, "18446744073709551615 (ULONG_MAX) < 2" : i32 @@ -48,9 +48,9 @@ vm.export @test_cmp_lt_u_0_i64 vm.func @test_cmp_lt_u_0_i64() { %lhs = vm.const.i64 2 : i64 - %lhs_dno = iree.do_not_optimize(%lhs) : i64 + %lhs_dno = util.do_not_optimize(%lhs) : i64 %rhs = vm.const.i64 -2 : i64 - %rhs_dno = iree.do_not_optimize(%rhs) : i64 + %rhs_dno = util.do_not_optimize(%rhs) : i64 %actual = vm.cmp.lt.i64.u %lhs_dno, %rhs_dno : i64 %expected = vm.const.i32 1 : i32 vm.check.eq %actual, %expected, "2 < -2 (as unsigned)" : i32 @@ -60,9 +60,9 @@ vm.export @test_cmp_lt_u_1_i64 vm.func @test_cmp_lt_u_1_i64() { %lhs = vm.const.i64 -2 : i64 - %lhs_dno = iree.do_not_optimize(%lhs) : i64 + %lhs_dno = util.do_not_optimize(%lhs) : i64 %rhs = vm.const.i64 2 : i64 - %rhs_dno = iree.do_not_optimize(%rhs) : i64 + %rhs_dno = util.do_not_optimize(%rhs) : i64 %actual = vm.cmp.lt.i64.u %lhs_dno, %rhs_dno : i64 %expected = vm.const.i32 0 : i32 vm.check.eq %actual, %expected, "-2 < 2 (as unsigned)" : i32 @@ -72,9 +72,9 @@ vm.export @test_cmp_lt_u_2_i64 vm.func @test_cmp_lt_u_2_i64() { %lhs = vm.const.i64 18446744073709551615 : i64 - %lhs_dno = iree.do_not_optimize(%lhs) : i64 + %lhs_dno = util.do_not_optimize(%lhs) : i64 %rhs = vm.const.i64 2 : i64 - %rhs_dno = iree.do_not_optimize(%rhs) : i64 + %rhs_dno = util.do_not_optimize(%rhs) : i64 %actual = vm.cmp.lt.i64.u %lhs_dno, %rhs_dno : i64 %expected = vm.const.i32 0 : i32 vm.check.eq %actual, %expected, "18446744073709551615 (ULONG_MAX) < 2 (as unsigned)" : i32 @@ -94,9 +94,9 @@ %false = vm.const.i32 0 : i32 %cn2 = vm.const.i64 -2 : i64 - %cn2_dno = iree.do_not_optimize(%cn2) : i64 + %cn2_dno = util.do_not_optimize(%cn2) : i64 %c2 = vm.const.i64 2 : i64 - %c2_dno = iree.do_not_optimize(%c2) : i64 + %c2_dno = util.do_not_optimize(%c2) : i64 %cmp_0 = vm.cmp.lte.i64.s %cn2_dno, %c2_dno : i64 vm.check.eq %cmp_0, %true, "-2 <= 2" : i32 @@ -121,9 +121,9 @@ %false = vm.const.i32 0 : i32 %cn2 = vm.const.i64 -2 : i64 - %cn2_dno = iree.do_not_optimize(%cn2) : i64 + %cn2_dno = util.do_not_optimize(%cn2) : i64 %c2 = vm.const.i64 2 : i64 - %c2_dno = iree.do_not_optimize(%c2) : i64 + %c2_dno = util.do_not_optimize(%c2) : i64 %cmp_0 = vm.cmp.gt.i64.s %cn2_dno, %c2_dno : i64 vm.check.eq %cmp_0, %false, "-2 > 2" : i32 @@ -148,9 +148,9 @@ %false = vm.const.i32 0 : i32 %cn2 = vm.const.i64 -2 : i64 - %cn2_dno = iree.do_not_optimize(%cn2) : i64 + %cn2_dno = util.do_not_optimize(%cn2) : i64 %c2 = vm.const.i64 2 : i64 - %c2_dno = iree.do_not_optimize(%c2) : i64 + %c2_dno = util.do_not_optimize(%c2) : i64 %cmp_0 = vm.cmp.gte.i64.s %cn2_dno, %c2_dno : i64 vm.check.eq %cmp_0, %false, "-2 >= 2" : i32
diff --git a/iree/vm/test/control_flow_ops.mlir b/iree/vm/test/control_flow_ops.mlir index 785394a..474867d 100644 --- a/iree/vm/test/control_flow_ops.mlir +++ b/iree/vm/test/control_flow_ops.mlir
@@ -26,7 +26,7 @@ vm.export @test_check_eq_always vm.func @test_check_eq_always() { %c1 = vm.const.i32 1 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 vm.check.eq %c1, %c1dno, "error!" : i32 vm.return } @@ -35,8 +35,8 @@ vm.func @fail_check_eq_never() { %c1 = vm.const.i32 1 : i32 %c2 = vm.const.i32 2 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 - %c2dno = iree.do_not_optimize(%c2) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 + %c2dno = util.do_not_optimize(%c2) : i32 vm.check.eq %c1dno, %c2dno, "error!" : i32 vm.return } @@ -48,7 +48,7 @@ vm.export @test_cond_br vm.func @test_cond_br() { %c1 = vm.const.i32 1 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 vm.cond_br %c1dno, ^bb1, ^bb2 ^bb1: vm.check.eq %c1dno, %c1dno, "error!" : i32 @@ -61,7 +61,7 @@ vm.export @test_cond_br_int_arg vm.func @test_cond_br_int_arg() { %c1 = vm.const.i32 1 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 vm.cond_br %c1dno, ^bb1(%c1dno : i32), ^bb2(%c1dno : i32) ^bb1(%arg1 : i32): vm.check.eq %arg1, %c1dno, "error!" : i32 @@ -75,7 +75,7 @@ vm.export @test_cond_br_ref_arg attributes {emitc.exclude} vm.func private @test_cond_br_ref_arg() { %c1 = vm.const.i32 1 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %ref = vm.const.ref.zero : !vm.ref<?> vm.cond_br %c1dno, ^bb1(%ref : !vm.ref<?>), ^bb2(%ref : !vm.ref<?>) ^bb1(%arg1 : !vm.ref<?>):
diff --git a/iree/vm/test/conversion_ops.mlir b/iree/vm/test/conversion_ops.mlir index 009c9af..3cf741e 100644 --- a/iree/vm/test/conversion_ops.mlir +++ b/iree/vm/test/conversion_ops.mlir
@@ -7,7 +7,7 @@ vm.export @test_trunc_i32_i8 vm.func @test_trunc_i32_i8() { %c1 = vm.const.i32 2147483647 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %v = vm.trunc.i32.i8 %c1dno : i32 -> i32 %c2 = vm.const.i32 255 : i32 vm.check.eq %v, %c2, "truncate unsigned i32 to unsigned i8" : i32 @@ -17,7 +17,7 @@ vm.export @test_trunc_i32_i16 vm.func @test_trunc_i32_i16() { %c1 = vm.const.i32 2147483647 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %v = vm.trunc.i32.i16 %c1dno : i32 -> i32 %c2 = vm.const.i32 65535 : i32 vm.check.eq %v, %c2, "truncate unsigned i32 to unsigned i16" : i32
diff --git a/iree/vm/test/conversion_ops_f32.mlir b/iree/vm/test/conversion_ops_f32.mlir index 4e2b766..93fbb91 100644 --- a/iree/vm/test/conversion_ops_f32.mlir +++ b/iree/vm/test/conversion_ops_f32.mlir
@@ -7,7 +7,7 @@ vm.export @test_cast_si32_f32_int_max vm.func @test_cast_si32_f32_int_max() { %c1 = vm.const.i32 2147483647 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %v = vm.cast.si32.f32 %c1dno : i32 -> f32 %c2 = vm.const.f32 2147483647.0 : f32 vm.check.eq %v, %c2, "cast signed integer to a floating-point value" : f32 @@ -17,7 +17,7 @@ vm.export @test_cast_si32_f32_int_min vm.func @test_cast_si32_f32_int_min() { %c1 = vm.const.i32 -2147483648 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %v = vm.cast.si32.f32 %c1dno : i32 -> f32 %c2 = vm.const.f32 -2147483648.0 : f32 vm.check.eq %v, %c2, "cast signed integer to a floating-point value" : f32 @@ -27,7 +27,7 @@ vm.export @test_cast_ui32_f32_int_max vm.func @test_cast_ui32_f32_int_max() { %c1 = vm.const.i32 4294967295 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %v = vm.cast.ui32.f32 %c1dno : i32 -> f32 %c2 = vm.const.f32 4294967295.0 : f32 vm.check.eq %v, %c2, "cast unsigned integer to a floating-point value" : f32 @@ -37,7 +37,7 @@ vm.export @test_cast_f32_si32_int_max vm.func @test_cast_f32_si32_int_max() { %c1 = vm.const.f32 2147483647.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.cast.f32.si32 %c1dno : f32 -> i32 %c2 = vm.const.i32 -2147483648 : i32 vm.check.eq %v, %c2, "cast floating-point value to a signed integer" : i32 @@ -47,7 +47,7 @@ vm.export @test_cast_f32_si32_int_min vm.func @test_cast_f32_si32_int_min() { %c1 = vm.const.f32 -2147483648.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.cast.f32.si32 %c1dno : f32 -> i32 %c2 = vm.const.i32 -2147483648 : i32 vm.check.eq %v, %c2, "cast floating-point value to a signed integer" : i32 @@ -57,7 +57,7 @@ vm.export @test_cast_f32_ui32_int_max vm.func @test_cast_f32_ui32_int_max() { %c1 = vm.const.f32 4294967295.0 : f32 - %c1dno = iree.do_not_optimize(%c1) : f32 + %c1dno = util.do_not_optimize(%c1) : f32 %v = vm.cast.f32.ui32 %c1dno : f32 -> i32 %c2 = vm.const.i32 0 : i32 vm.check.eq %v, %c2, "cast floating-point value to an unsigned integer" : i32
diff --git a/iree/vm/test/conversion_ops_i64.mlir b/iree/vm/test/conversion_ops_i64.mlir index b763a67..c23083e 100644 --- a/iree/vm/test/conversion_ops_i64.mlir +++ b/iree/vm/test/conversion_ops_i64.mlir
@@ -7,7 +7,7 @@ vm.export @test_trunc_i64_i32 vm.func @test_trunc_i64_i32() { %c1 = vm.const.i64 9223372036854775807 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %v = vm.trunc.i64.i32 %c1dno : i64 -> i32 %c2 = vm.const.i32 4294967295 : i32 vm.check.eq %v, %c2, "truncate unsigned i64 to unsigned i32" : i32
diff --git a/iree/vm/test/list_ops.mlir b/iree/vm/test/list_ops.mlir index 514de69..098e24c 100644 --- a/iree/vm/test/list_ops.mlir +++ b/iree/vm/test/list_ops.mlir
@@ -12,7 +12,7 @@ %list = vm.list.alloc %c42 : (i32) -> !vm.list<i8> vm.list.reserve %list, %c100 : (!vm.list<i8>, i32) %sz = vm.list.size %list : (!vm.list<i8>) -> i32 - %sz_dno = iree.do_not_optimize(%sz) : i32 + %sz_dno = util.do_not_optimize(%sz) : i32 vm.check.eq %sz_dno, %c0, "list<i8>.empty.size()=0" : i32 vm.return } @@ -125,7 +125,7 @@ %list = vm.list.alloc %c1 : (i32) -> !vm.list<i32> vm.list.resize %list, %c1 : (!vm.list<i32>, i32) %v = vm.list.get.i32 %list, %c1 : (!vm.list<i32>, i32) -> i32 - %v_dno = iree.do_not_optimize(%v) : i32 + %v_dno = util.do_not_optimize(%v) : i32 vm.return }
diff --git a/iree/vm/test/list_variant_ops.mlir b/iree/vm/test/list_variant_ops.mlir index bd1a48d..fcd909d 100644 --- a/iree/vm/test/list_variant_ops.mlir +++ b/iree/vm/test/list_variant_ops.mlir
@@ -97,10 +97,10 @@ vm.func @fail_uninitialized_access() { %c0 = vm.const.i32 0 : i32 %c1 = vm.const.i32 1 : i32 - + %ref = vm.const.ref.rodata @byte_buffer : !vm.buffer %list = vm.list.alloc %c1 : (i32) -> !vm.list<?> - + vm.list.set.ref %list, %c0, %ref : (!vm.list<?>, i32, !vm.buffer) vm.return } @@ -111,9 +111,9 @@ %list = vm.list.alloc %c1 : (i32) -> !vm.list<?> vm.list.resize %list, %c1 : (!vm.list<?>, i32) - + %ref = vm.list.get.ref %list, %c1 : (!vm.list<?>, i32) -> !vm.buffer - %ref_dno = iree.do_not_optimize(%ref) : !vm.buffer + %ref_dno = util.do_not_optimize(%ref) : !vm.buffer vm.return } @@ -121,7 +121,7 @@ vm.func @fail_out_of_bounds_write() { %c0 = vm.const.i32 0 : i32 %c1 = vm.const.i32 1 : i32 - + %ref = vm.const.ref.rodata @byte_buffer : !vm.buffer %list = vm.list.alloc %c1 : (i32) -> !vm.list<?> vm.list.resize %list, %c1 : (!vm.list<?>, i32)
diff --git a/iree/vm/test/ref_ops.mlir b/iree/vm/test/ref_ops.mlir index 27496e2..8b24d04 100644 --- a/iree/vm/test/ref_ops.mlir +++ b/iree/vm/test/ref_ops.mlir
@@ -1,12 +1,12 @@ vm.module @ref_ops { - + vm.rodata private @buffer_i8 dense<[1, 2, 3]> : tensor<3xi8> vm.rodata private @buffer_i32 dense<[1, 2, 3]> : tensor<3xi32> vm.export @test_zero_ref_eq vm.func @test_zero_ref_eq() { %ref = vm.const.ref.zero : !vm.ref<?> - %ref_dno = iree.do_not_optimize(%ref) : !vm.ref<?> + %ref_dno = util.do_not_optimize(%ref) : !vm.ref<?> vm.check.eq %ref_dno, %ref_dno : !vm.ref<?> vm.return } @@ -14,9 +14,9 @@ vm.export @test_ref_eq vm.func @test_ref_eq() { %ref_1 = vm.const.ref.rodata @buffer_i8 : !vm.buffer - %ref_1_dno = iree.do_not_optimize(%ref_1) : !vm.buffer + %ref_1_dno = util.do_not_optimize(%ref_1) : !vm.buffer %ref_2 = vm.const.ref.rodata @buffer_i8 : !vm.buffer - %ref_2_dno = iree.do_not_optimize(%ref_2) : !vm.buffer + %ref_2_dno = util.do_not_optimize(%ref_2) : !vm.buffer vm.check.eq %ref_1_dno, %ref_2_dno : !vm.buffer vm.return } @@ -24,9 +24,9 @@ vm.export @test_ref_ne vm.func @test_ref_ne() { %ref_i8 = vm.const.ref.rodata @buffer_i8 : !vm.buffer - %ref_i8_dno = iree.do_not_optimize(%ref_i8) : !vm.buffer + %ref_i8_dno = util.do_not_optimize(%ref_i8) : !vm.buffer %ref_i32 = vm.const.ref.rodata @buffer_i32 : !vm.buffer - %ref_i32_dno = iree.do_not_optimize(%ref_i32) : !vm.buffer + %ref_i32_dno = util.do_not_optimize(%ref_i32) : !vm.buffer vm.check.ne %ref_i8_dno, %ref_i32_dno : !vm.buffer vm.return } @@ -34,7 +34,7 @@ vm.export @test_ref_nz vm.func @test_ref_nz() { %ref = vm.const.ref.rodata @buffer_i8 : !vm.buffer - %ref_dno = iree.do_not_optimize(%ref) : !vm.buffer + %ref_dno = util.do_not_optimize(%ref) : !vm.buffer vm.check.nz %ref_dno : !vm.buffer vm.return }
diff --git a/iree/vm/test/shift_ops.mlir b/iree/vm/test/shift_ops.mlir index 8a43ee6..f80110f 100644 --- a/iree/vm/test/shift_ops.mlir +++ b/iree/vm/test/shift_ops.mlir
@@ -7,7 +7,7 @@ vm.export @test_shl_i32 vm.func @test_shl_i32() { %c1 = vm.const.i32 1 : i32 - %c1dno = iree.do_not_optimize(%c1) : i32 + %c1dno = util.do_not_optimize(%c1) : i32 %c2 = vm.const.i32 2 : i32 %v = vm.shl.i32 %c1dno, %c2 : i32 %c4 = vm.const.i32 4 : i32 @@ -18,7 +18,7 @@ vm.export @test_shr_i32s vm.func @test_shr_i32s() { %cn1 = vm.const.i32 -1 : i32 - %cn1dno = iree.do_not_optimize(%cn1) : i32 + %cn1dno = util.do_not_optimize(%cn1) : i32 %c2 = vm.const.i32 2 : i32 %v = vm.shr.i32.s %cn1dno, %c2 : i32 vm.check.eq %v, %cn1dno, "-1>>2=-1" : i32 @@ -28,7 +28,7 @@ vm.export @test_shr_i32u vm.func @test_shr_i32u() { %c4 = vm.const.i32 4 : i32 - %c4dno = iree.do_not_optimize(%c4) : i32 + %c4dno = util.do_not_optimize(%c4) : i32 %c2 = vm.const.i32 2 : i32 %v = vm.shr.i32.u %c4dno, %c2 : i32 %c1 = vm.const.i32 1 : i32
diff --git a/iree/vm/test/shift_ops_i64.mlir b/iree/vm/test/shift_ops_i64.mlir index 53a4490..de4ab42 100644 --- a/iree/vm/test/shift_ops_i64.mlir +++ b/iree/vm/test/shift_ops_i64.mlir
@@ -7,7 +7,7 @@ vm.export @test_shl_i64 vm.func @test_shl_i64() { %c1 = vm.const.i64 1 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %shamt = vm.const.i32 2 : i32 %v = vm.shl.i64 %c1dno, %shamt : i64 %c4 = vm.const.i64 4 : i64 @@ -18,7 +18,7 @@ vm.export @test_shr_i64s vm.func @test_shr_i64s() { %c1 = vm.const.i64 -1 : i64 - %c1dno = iree.do_not_optimize(%c1) : i64 + %c1dno = util.do_not_optimize(%c1) : i64 %shamt = vm.const.i32 2 : i32 %v = vm.shr.i64.s %c1dno, %shamt : i64 %cn1 = vm.const.i64 -1 : i64 @@ -29,7 +29,7 @@ vm.export @test_shr_i64u vm.func @test_shr_i64u() { %c4 = vm.const.i64 4 : i64 - %c4dno = iree.do_not_optimize(%c4) : i64 + %c4dno = util.do_not_optimize(%c4) : i64 %shamt = vm.const.i32 2 : i32 %v = vm.shr.i64.u %c4dno, %shamt : i64 %c1 = vm.const.i64 1 : i64
diff --git a/llvm-projects/README.md b/llvm-projects/README.md new file mode 100644 index 0000000..143a4b7 --- /dev/null +++ b/llvm-projects/README.md
@@ -0,0 +1,15 @@ +# IREE LLVM-based projects + +Projects in this tree are targeted for interop with upstream LLVM/MLIR and +related projects. They follow LLVM standards, build system, API design and +packaging conventions. In general they are either: + +* Meant to be used as an `LLVM_EXTERNAL_PROJECT`. +* A standalone project based on the LLVM build system. + +We publish projects here when they are meant to consume or interoperate at a +build/source level with other projects in the ecosystem. + +## Exceptions to LLVM coding standards + +* File headers follow IREE conventions for copyright/license banner.
diff --git a/llvm-projects/iree-compiler-api/.gitignore b/llvm-projects/iree-compiler-api/.gitignore new file mode 100644 index 0000000..a0f5f9d --- /dev/null +++ b/llvm-projects/iree-compiler-api/.gitignore
@@ -0,0 +1,3 @@ +/build/ +/wheels/ +/*.egg-info/
diff --git a/llvm-projects/iree-compiler-api/CMakeLists.txt b/llvm-projects/iree-compiler-api/CMakeLists.txt new file mode 100644 index 0000000..977bf19 --- /dev/null +++ b/llvm-projects/iree-compiler-api/CMakeLists.txt
@@ -0,0 +1,137 @@ +# This is a standalone project for the creation of compiler API and tools +# packages. It is meant to be built directly and aggregates all other projects +# that make up the IREE compiler input surface area. As a standalone +# project intended for direct distribution, it hard-codes a number of important +# CMake settings for producing optimal binaries and packages. +cmake_minimum_required(VERSION 3.13.4) + +if(POLICY CMP0068) + cmake_policy(SET CMP0068 NEW) + set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON) +endif() + +if(POLICY CMP0075) + cmake_policy(SET CMP0075 NEW) +endif() + +if(POLICY CMP0077) + cmake_policy(SET CMP0077 NEW) +endif() + +# Honor all visibility presets (please, please, pretty-please). +# This needs to be set as a default because some IREE dependencies specify +# a CMake min version of 3.0, which causes them to set it locally to OLD. +set(CMAKE_POLICY_DEFAULT_CMP0063 NEW) + +project(iree-compiler-backend LANGUAGES C CXX) + +# Directory layout. +# When building in-tree, this directory exists relative to the overall +# repository. However, when building from a source package, sub-directories +# will be populated below this directory. This is the primary place that the +# switch is done. +set(IREE_COMPILER_API_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}") +set(IREE_COMPILER_API_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}") + +set(IREE_COMPILER_API_INTREE ON) +if(IREE_COMPILER_API_INTREE) + set(LLVM_EXTERNAL_MLIR_IREE_DIALECTS_SOURCE_DIR "${IREE_COMPILER_API_SOURCE_DIR}/../iree-dialects") + set(IREE_MAIN_SOURCE_DIR "${IREE_COMPILER_API_SOURCE_DIR}/../..") + set(LLVM_MAIN_SRC_DIR "${IREE_COMPILER_API_SOURCE_DIR}/../../third_party/llvm-project/llvm") + set(LLVM_EXTERNAL_MLIR_HLO_SOURCE_DIR "${IREE_COMPILER_API_SOURCE_DIR}/../../third_party/mlir-hlo") + +else() + message(FATAL_ERROR "Non intree (source package) not yet supported") +endif() + +# Configuration includes. +include(${IREE_MAIN_SOURCE_DIR}/build_tools/cmake/iree_third_party_cmake_options.cmake) + +# Dependent directories. +set(MLIR_MAIN_SRC_DIR ${LLVM_MAIN_SRC_DIR}/../mlir) +set(LLVM_MAIN_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/llvm") +set(MLIR_MAIN_BINARY_DIR "${LLVM_MAIN_BINARY_DIR}/tools/mlir") +set(IREE_MAIN_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/iree") + +# CMake settings. +set(BUILD_SHARED_LIBS OFF CACHE BOOL "" FORCE) +set(CMAKE_PLATFORM_NO_VERSIONED_SONAME ON) +set(CMAKE_VISIBILITY_INLINES_HIDDEN ON) +set(CMAKE_C_VISIBILITY_PRESET "hidden") +set(CMAKE_CXX_VISIBILITY_PRESET "hidden") + +# Required MLIR settings. +# TODO: We require an ordering of mlir-dependent external projects to be +# loaded after mlir. We take advantage of the lexical ordering here by +# prefixing, which feels gross but is stable. Investigate a better mechanism +# upstream. +iree_set_llvm_cmake_options() +set(LLVM_EXTERNAL_PROJECTS "mlir-iree-dialects;mlir-hlo" CACHE STRING "" FORCE) +set(MLIR_ENABLE_BINDINGS_PYTHON ON CACHE BOOL "" FORCE) +set(MLIR_BINDINGS_PYTHON_LOCK_VERSION OFF CACHE BOOL "" FORCE) +# TODO: Conflicting gtest. +set(LLVM_INCLUDE_TESTS OFF CACHE BOOL "" FORCE) +# TODO: Conflicting benchmark. +set(LLVM_INCLUDE_BENCHMARKS OFF CACHE BOOL "" FORCE) + +# Required mlir-hlo settings. +# TODO: Consider removing this upstream and just using the main +# MLIR_ENABLE_BINDINGS_PYTHON option. +set(MHLO_ENABLE_BINDINGS_PYTHON ON CACHE BOOL "" FORCE) + +# TODO: Fix this upstream. Each of these system include hacks is broken in +# a different way, so there is not an easy local fix. They should be removed +# one be one until this project builds. Since this is the first time all of this +# has been mashed together, this is disappointing but not unexpected. +macro(add_system_include_hack include_dir) + if("${include_dir}" STREQUAL "") + message(SEND_ERROR "Cannot add empty include dir (will kill tablegen): ${include_dir}") + endif() + include_directories(SYSTEM "${include_dir}") +endmacro() + +add_system_include_hack(${LLVM_MAIN_SRC_DIR}/include) +add_system_include_hack(${LLVM_MAIN_BINARY_DIR}/include) +add_system_include_hack(${MLIR_MAIN_SRC_DIR}/include) +add_system_include_hack(${MLIR_MAIN_BINARY_DIR}/include) +add_system_include_hack(${IREE_MAIN_SOURCE_DIR}) +add_system_include_hack(${IREE_MAIN_BINARY_DIR}) +add_system_include_hack(${LLVM_EXTERNAL_MLIR_IREE_DIALECTS_SOURCE_DIR}/include) +add_system_include_hack(${LLVM_MAIN_BINARY_DIR}/tools/iree-dialects/include) +add_system_include_hack(${LLVM_EXTERNAL_MLIR_HLO_SOURCE_DIR}/include) +add_system_include_hack(${LLVM_MAIN_BINARY_DIR}/tools/mlir-hlo/include) +add_system_include_hack(${IREE_COMPILER_API_SOURCE_DIR}/include) + +function(iree_compiler_target_includes target) + target_include_directories(${target} PUBLIC + $<BUILD_INTERFACE:${IREE_COMPILER_API_SOURCE_DIR}/include> + $<BUILD_INTERFACE:${IREE_COMPILER_API_BINARY_DIR}/include> + ) +endfunction() + +# Common CMake module paths. +list(APPEND CMAKE_MODULE_PATH ${MLIR_MAIN_SRC_DIR}/cmake/modules) +list(APPEND CMAKE_MODULE_PATH ${LLVM_MAIN_SRC_DIR}/cmake/modules) + +# Configure Python3 deps so that everyone downstream of here latches to the same +# thing. +include(MLIRDetectPythonEnv) +find_package(Python3 ${LLVM_MINIMUM_PYTHON_VERSION} + COMPONENTS Interpreter Development NumPy REQUIRED) +mlir_detect_pybind11_install() +find_package(pybind11 2.6 CONFIG REQUIRED) + +# Include LLVM. +message(STATUS "Configuring LLVM from (${LLVM_MAIN_SRC_DIR} into ${LLVM_MAIN_BINARY_DIR})...") +add_subdirectory("${LLVM_MAIN_SRC_DIR}" "${LLVM_MAIN_BINARY_DIR}" EXCLUDE_FROM_ALL) + +# Include IREE. +message(STATUS "Configuring IREE from (${IREE_MAIN_SOURCE_DIR} into ${IREE_MAIN_BINARY_DIR}") +add_subdirectory("${IREE_MAIN_SOURCE_DIR}" "${IREE_MAIN_BINARY_DIR}" EXCLUDE_FROM_ALL) + +# Sub-directories. +# Since building outside of the LLVM build system, setup options for local +# sources. +include(HandleLLVMOptions) +add_subdirectory(lib) +add_subdirectory(python)
diff --git a/llvm-projects/iree-compiler-api/README.md b/llvm-projects/iree-compiler-api/README.md new file mode 100644 index 0000000..3503f5b --- /dev/null +++ b/llvm-projects/iree-compiler-api/README.md
@@ -0,0 +1,11 @@ +# IREE Compiler Backend + +This is a top-level project for building public facing API packages that +combine all dependent MLIR-based projects along with IREE's compiler backend +API. + +It exports artifacts: + +* `iree-compiler-backend` Python wheel and source distributions, providing + the `iree.compiler_backend` Python packages. +* Compiler C-API source and binary tarballs (future), with some CLI tools.
diff --git a/llvm-projects/iree-compiler-api/build_tools/build_intree.sh b/llvm-projects/iree-compiler-api/build_tools/build_intree.sh new file mode 100644 index 0000000..a7a950f --- /dev/null +++ b/llvm-projects/iree-compiler-api/build_tools/build_intree.sh
@@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright 2021 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +# Simple script that does a CMake configure and build of the project. +# This is meant for CI's and project maintainers. + +set -eu -o errtrace + +project_dir="$(cd $(dirname $0)/.. && pwd)" +build_dir="$project_dir/build" + +cmake -GNinja -B"$build_dir" "$project_dir" \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DCMAKE_BUILD_TYPE=Release \ + "$@" + +cd $build_dir +ninja
diff --git a/llvm-projects/iree-compiler-api/build_tools/build_python_wheels.sh b/llvm-projects/iree-compiler-api/build_tools/build_python_wheels.sh new file mode 100755 index 0000000..96f3a0e --- /dev/null +++ b/llvm-projects/iree-compiler-api/build_tools/build_python_wheels.sh
@@ -0,0 +1,38 @@ +#!/bin/bash +# Copyright 2021 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +set -e + +if [ -z "$PYTHON" ]; then + PYTHON="$(which python)" +fi +version="$("$PYTHON" --version)" +echo "Using python: $PYTHON (version $version)" + +repo_root="$(cd $(dirname $0)/.. && pwd)" +wheelhouse="$repo_root/wheels" +mkdir -p "$wheelhouse" + +echo "---- BUILDING iree-compiler-api ----" +if [ -x "$(command -v ccache)" ]; then + echo "Using ccache" + export CMAKE_C_COMPILER_LAUNCHER=ccache + export CMAKE_CXX_COMPILER_LAUNCHER=ccache +fi +if [ -x "$(command -v ninja)" ]; then + echo "Using ninja" + export CMAKE_GENERATOR=Ninja +fi +$PYTHON -m pip wheel "${repo_root}" \ + --use-feature=in-tree-build \ + -w "$wheelhouse" -v + +echo "---- INSTALLING iree-compiler-api ----" +$PYTHON -m pip install -f "$wheelhouse" --force-reinstall iree-compiler-api + +echo "---- QUICK SMOKE TEST ----" +$PYTHON $repo_root/build_tools/smoketest.py
diff --git a/llvm-projects/iree-compiler-api/build_tools/smoketest.py b/llvm-projects/iree-compiler-api/build_tools/smoketest.py new file mode 100644 index 0000000..381cc28 --- /dev/null +++ b/llvm-projects/iree-compiler-api/build_tools/smoketest.py
@@ -0,0 +1,52 @@ +# Copyright 2021 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +import io + +from iree.compiler import ir +from iree.compiler import passmanager +from iree.compiler.dialects import chlo +from iree.compiler.dialects import mhlo +from iree.compiler.dialects import iree as iree_dialect +from iree.compiler.dialects import builtin +from iree.compiler.dialects import std +from iree.compiler.dialects import linalg +from iree.compiler.dialects import linalg +from iree.compiler.dialects import math +from iree.compiler.dialects import memref +from iree.compiler.dialects import shape +from iree.compiler.dialects import tensor +from iree.compiler.dialects import tosa +from iree.compiler.dialects import vector + +from iree.compiler.api import driver + +with ir.Context() as ctx: + chlo.register_chlo_dialect(ctx) + mhlo.register_mhlo_dialect(ctx) + iree_dialect.register_iree_dialect(ctx) + + input_module = ir.Module.parse(r""" + builtin.module { + builtin.func @fabs(%arg0: tensor<1x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<4x4xf32> { + %0 = chlo.broadcast_add %arg0, %arg1 : (tensor<1x4xf32>, tensor<4x1xf32>) -> tensor<4x4xf32> + %1 = "mhlo.abs"(%0) : (tensor<4x4xf32>) -> tensor<4x4xf32> + return %1 : tensor<4x4xf32> + } + } + """) + + options = driver.CompilerOptions() + options.set_input_dialect_mhlo() + options.add_target_backend("cpu") + pm = passmanager.PassManager() + driver.build_iree_vm_pass_pipeline(options, pm) + pm.run(input_module) + + print(input_module) + bytecode_io = io.BytesIO() + driver.translate_module_to_vm_bytecode(options, input_module, bytecode_io) + print(f"Bytecode module len = {len(bytecode_io.getbuffer())}")
diff --git a/llvm-projects/iree-compiler-api/include/iree-compiler-c/Compiler.h b/llvm-projects/iree-compiler-api/include/iree-compiler-c/Compiler.h new file mode 100644 index 0000000..5ff1284 --- /dev/null +++ b/llvm-projects/iree-compiler-api/include/iree-compiler-c/Compiler.h
@@ -0,0 +1,67 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#ifndef IREE_LLVM_PROJECTS_IREE_COMPILER_API_COMPILER_H +#define IREE_LLVM_PROJECTS_IREE_COMPILER_API_COMPILER_H + +#include "mlir-c/Pass.h" +#include "mlir-c/Support.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define DEFINE_C_API_STRUCT(name, storage) \ + struct name { \ + storage *ptr; \ + }; \ + typedef struct name name + +DEFINE_C_API_STRUCT(IreeCompilerOptions, void); + +//===----------------------------------------------------------------------===// +// Registration. +//===----------------------------------------------------------------------===// + +MLIR_CAPI_EXPORTED void ireeCompilerRegisterTargetBackends(); + +//===----------------------------------------------------------------------===// +// Compiler options. +//===----------------------------------------------------------------------===// + +// Creates and destroys a compiler options structure. +MLIR_CAPI_EXPORTED IreeCompilerOptions ireeCompilerOptionsCreate(); +MLIR_CAPI_EXPORTED void ireeCompilerOptionsDestroy(IreeCompilerOptions options); + +MLIR_CAPI_EXPORTED void ireeCompilerOptionsSetInputDialectMHLO( + IreeCompilerOptions options); +MLIR_CAPI_EXPORTED void ireeCompilerOptionsSetInputDialectTOSA( + IreeCompilerOptions options); +MLIR_CAPI_EXPORTED void ireeCompilerOptionsAddTargetBackend( + IreeCompilerOptions options, const char *targetBackend); + +//===----------------------------------------------------------------------===// +// Compiler stages. +//===----------------------------------------------------------------------===// + +// Builds a pass manager for transforming from an input module op to the IREE VM +// dialect. This represents the primary compilation stage with serialization to +// specific formats following. +MLIR_CAPI_EXPORTED void ireeCompilerBuildIREEVMPassPipeline( + IreeCompilerOptions options, MlirOpPassManager passManager); + +// Translates a module op derived from the ireeCompilerBuildIREEVMPassPipeline +// to serialized bytecode. The module op may either be an outer builtin ModuleOp +// wrapping a VM::ModuleOp or a VM::ModuleOp. +MLIR_CAPI_EXPORTED MlirLogicalResult ireeCompilerTranslateModuletoVMBytecode( + IreeCompilerOptions options, MlirOperation moduleOp, + MlirStringCallback dataCallback, void *dataUserObject); + +#ifdef __cplusplus +} +#endif + +#endif // IREE_LLVM_PROJECTS_IREE_COMPILER_API_COMPILER_H
diff --git a/llvm-projects/iree-compiler-api/lib/CAPI/CMakeLists.txt b/llvm-projects/iree-compiler-api/lib/CAPI/CMakeLists.txt new file mode 100644 index 0000000..e5fcc0d --- /dev/null +++ b/llvm-projects/iree-compiler-api/lib/CAPI/CMakeLists.txt
@@ -0,0 +1,19 @@ +add_mlir_public_c_api_library(IREECompilerAPICompilerCAPI + Compiler.cpp + # TODO: If installing, complains about IREEVM not being in any export set. + DISABLE_INSTALL + LINK_COMPONENTS + Support + LINK_LIBS PUBLIC + MLIRIR + iree::compiler::Dialect::VM::IR::IR + iree::compiler::Dialect::VM::Target::Bytecode::Bytecode + iree::compiler::Translation::IREEVM + + # All HAL Targets. + iree::tools::init_targets +) + +# TODO: Fix upstream so there is a way to know what the actual compile target +# is (versus prefixing with "obj." which is conditional). +iree_compiler_target_includes(obj.IREECompilerAPICompilerCAPI)
diff --git a/llvm-projects/iree-compiler-api/lib/CAPI/Compiler.cpp b/llvm-projects/iree-compiler-api/lib/CAPI/Compiler.cpp new file mode 100644 index 0000000..3b6ae8d --- /dev/null +++ b/llvm-projects/iree-compiler-api/lib/CAPI/Compiler.cpp
@@ -0,0 +1,102 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "iree-compiler-c/Compiler.h" + +#include "iree/compiler/Dialect/VM/IR/VMOps.h" +#include "iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.h" +#include "iree/compiler/Translation/IREEVM.h" +#include "iree/tools/init_targets.h" +#include "mlir/CAPI/IR.h" +#include "mlir/CAPI/Pass.h" +#include "mlir/CAPI/Support.h" +#include "mlir/CAPI/Utils.h" +#include "mlir/CAPI/Wrap.h" +#include "mlir/IR/BuiltinOps.h" + +using namespace mlir; +using namespace mlir::iree_compiler; + +// TODO: There is a loose ::IREE namespace somewhere which means that we +// have to fully qualify from the unnamed namespace. +using HALTargetOptions = mlir::iree_compiler::IREE::HAL::TargetOptions; +using VMTargetOptions = mlir::iree_compiler::IREE::VM::TargetOptions; +using VMBytecodeTargetOptions = + mlir::iree_compiler::IREE::VM::BytecodeTargetOptions; + +namespace { +// We have one composite options struct for everything. Not all components +// are applicable to every translation. +struct CompilerOptions { + BindingOptions bindingOptions; + InputDialectOptions inputDialectOptions; + HALTargetOptions executableOptions; + VMTargetOptions vmTargetOptions; + VMBytecodeTargetOptions vmBytecodeTargetOptions; +}; +} // namespace + +DEFINE_C_API_PTR_METHODS(IreeCompilerOptions, CompilerOptions) + +void ireeCompilerRegisterTargetBackends() { registerHALTargetBackends(); } + +IreeCompilerOptions ireeCompilerOptionsCreate() { + return wrap(new CompilerOptions); +} + +void ireeCompilerOptionsDestroy(IreeCompilerOptions options) { + delete unwrap(options); +} + +void ireeCompilerOptionsAddTargetBackend(IreeCompilerOptions options, + const char *targetBackend) { + unwrap(options)->executableOptions.targets.push_back( + std::string(targetBackend)); +} + +void ireeCompilerOptionsSetInputDialectMHLO(IreeCompilerOptions options) { + unwrap(options)->inputDialectOptions.type = InputDialectOptions::Type::mhlo; +} + +void ireeCompilerOptionsSetInputDialectTOSA(IreeCompilerOptions options) { + unwrap(options)->inputDialectOptions.type = InputDialectOptions::Type::tosa; +} + +void ireeCompilerBuildIREEVMPassPipeline(IreeCompilerOptions options, + MlirOpPassManager passManager) { + auto *optionsCpp = unwrap(options); + auto *passManagerCpp = unwrap(passManager); + buildIREEVMTransformPassPipeline( + optionsCpp->bindingOptions, optionsCpp->inputDialectOptions, + optionsCpp->executableOptions, optionsCpp->vmTargetOptions, + *passManagerCpp); +} + +// Translates a module op derived from the ireeCompilerBuildIREEVMPassPipeline +// to serialized bytecode. The module op may either be an outer builtin ModuleOp +// wrapping a VM::ModuleOp or a VM::ModuleOp. +MlirLogicalResult ireeCompilerTranslateModuletoVMBytecode( + IreeCompilerOptions options, MlirOperation moduleOp, + MlirStringCallback dataCallback, void *dataUserObject) { + auto *optionsCpp = unwrap(options); + Operation *moduleOpCpp = unwrap(moduleOp); + LogicalResult result = failure(); + + mlir::detail::CallbackOstream output(dataCallback, dataUserObject); + if (auto op = llvm::dyn_cast<mlir::ModuleOp>(moduleOpCpp)) { + result = iree_compiler::IREE::VM::translateModuleToBytecode( + op, optionsCpp->vmBytecodeTargetOptions, output); + } else if (auto op = llvm::dyn_cast<iree_compiler::IREE::VM::ModuleOp>( + moduleOpCpp)) { + result = iree_compiler::IREE::VM::translateModuleToBytecode( + op, optionsCpp->vmBytecodeTargetOptions, output); + } else { + emitError(moduleOpCpp->getLoc()) << "expected a supported module operation"; + result = failure(); + } + + return wrap(result); +}
diff --git a/llvm-projects/iree-compiler-api/lib/CMakeLists.txt b/llvm-projects/iree-compiler-api/lib/CMakeLists.txt new file mode 100644 index 0000000..a5bbf9e --- /dev/null +++ b/llvm-projects/iree-compiler-api/lib/CMakeLists.txt
@@ -0,0 +1 @@ +add_subdirectory(CAPI)
diff --git a/llvm-projects/iree-compiler-api/pyproject.toml b/llvm-projects/iree-compiler-api/pyproject.toml new file mode 100644 index 0000000..bafc162 --- /dev/null +++ b/llvm-projects/iree-compiler-api/pyproject.toml
@@ -0,0 +1,14 @@ +[build-system] +requires = [ + "setuptools>=42", + "wheel", + # There is no fundamental reason to pin this CMake version, beyond + # build stability. + "cmake==3.18", + # MLIR build depends. + "numpy", + # Version 2.7.0 excluded: https://github.com/pybind/pybind11/issues/3136 + "pybind11>=2.6.0,!=2.7.0", + "PyYAML", +] +build-backend = "setuptools.build_meta"
diff --git a/llvm-projects/iree-compiler-api/python/CMakeLists.txt b/llvm-projects/iree-compiler-api/python/CMakeLists.txt new file mode 100644 index 0000000..dc01e86 --- /dev/null +++ b/llvm-projects/iree-compiler-api/python/CMakeLists.txt
@@ -0,0 +1,77 @@ +include(AddMLIRPython) + +################################################################################ +# Sources +################################################################################ + +declare_mlir_python_sources(IREECompilerAPIPythonSources + ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/iree/compiler" + SOURCES + api/driver.py + version.py +) +declare_mlir_python_sources(IREECompilerAPIPythonExtensions) + +################################################################################ +# Extensions +################################################################################ + +declare_mlir_python_extension(IREECompilerAPIPythonExtensions.CompilerDriver + MODULE_NAME _ireeCompilerDriver + ADD_TO_PARENT IREECompilerAPIPythonExtensions + SOURCES + ${CMAKE_CURRENT_SOURCE_DIR}/CompilerModule.cpp + EMBED_CAPI_LINK_LIBS + IREECompilerAPICompilerCAPI + PRIVATE_LINK_LIBS + LLVMSupport +) + +################################################################################ +# Generate packages and shared library +################################################################################ + +set(_source_components + # Local sources. + IREECompilerAPIPythonSources + IREECompilerAPIPythonExtensions + + # TODO: Core is now implicitly building/registering all dialects, increasing + # build burden by ~5x. Make it stop. + MLIRPythonSources.Core + + # Core dialects (constrained to IREE input dialects). + MLIRPythonSources.Dialects.builtin + MLIRPythonSources.Dialects.linalg + MLIRPythonSources.Dialects.math + MLIRPythonSources.Dialects.memref + MLIRPythonSources.Dialects.shape + MLIRPythonSources.Dialects.std + MLIRPythonSources.Dialects.tensor + MLIRPythonSources.Dialects.tosa + MLIRPythonSources.Dialects.vector + + # mhlo project. + MLIRHLOPythonSources + MLIRHLOPythonExtensions + + # iree-dialects project. + IREEDialectsPythonSources + IREEDialectsPythonExtensions +) + +add_mlir_python_common_capi_library(IREECompilerAggregateCAPI + INSTALL_COMPONENT IREECompilerPythonModules + INSTALL_DESTINATION python_package/iree/compiler/_mlir_libs + OUTPUT_DIRECTORY "${IREE_COMPILER_API_BINARY_DIR}/python_package/iree/compiler/_mlir_libs" + RELATIVE_INSTALL_ROOT "../../../.." + DECLARED_SOURCES ${_source_components} +) + +add_mlir_python_modules(IREECompilerPythonModules + ROOT_PREFIX "${IREE_COMPILER_API_BINARY_DIR}/python_package/iree/compiler" + INSTALL_PREFIX "python_package/iree/compiler" + DECLARED_SOURCES ${_source_components} + COMMON_CAPI_LINK_LIBS + IREECompilerAggregateCAPI + )
diff --git a/llvm-projects/iree-compiler-api/python/CompilerModule.cpp b/llvm-projects/iree-compiler-api/python/CompilerModule.cpp new file mode 100644 index 0000000..d7d5ebb --- /dev/null +++ b/llvm-projects/iree-compiler-api/python/CompilerModule.cpp
@@ -0,0 +1,104 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "iree-compiler-c/Compiler.h" +#include "mlir-c/Bindings/Python/Interop.h" +#include "mlir/Bindings/Python/PybindAdaptors.h" + +namespace py = pybind11; + +namespace { + +struct PyCompilerOptions { + PyCompilerOptions() : options(ireeCompilerOptionsCreate()) {} + PyCompilerOptions(const PyCompilerOptions &) = delete; + void operator=(const PyCompilerOptions &) = delete; + PyCompilerOptions(PyCompilerOptions &&other) : options(other.options) { + other.options = {nullptr}; + } + ~PyCompilerOptions() { + if (options.ptr) ireeCompilerOptionsDestroy(options); + } + IreeCompilerOptions options; +}; + +/// Accumulates int a python file-like object, either writing text (default) +/// or binary. +class PyFileAccumulator { + public: + PyFileAccumulator(pybind11::object fileObject, bool binary) + : pyWriteFunction(fileObject.attr("write")), binary(binary) {} + + void *getUserData() { return this; } + + MlirStringCallback getCallback() { + return [](MlirStringRef part, void *userData) { + pybind11::gil_scoped_acquire(); + PyFileAccumulator *accum = static_cast<PyFileAccumulator *>(userData); + if (accum->binary) { + // Note: Still has to copy and not avoidable with this API. + pybind11::bytes pyBytes(part.data, part.length); + accum->pyWriteFunction(pyBytes); + } else { + pybind11::str pyStr(part.data, + part.length); // Decodes as UTF-8 by default. + accum->pyWriteFunction(pyStr); + } + }; + } + + private: + pybind11::object pyWriteFunction; + bool binary; +}; + +} // namespace + +PYBIND11_MODULE(_ireeCompilerDriver, m) { + m.doc() = "iree-compiler driver api"; + ireeCompilerRegisterTargetBackends(); + + py::class_<PyCompilerOptions>(m, "CompilerOptions") + .def(py::init<>()) + .def("set_input_dialect_mhlo", + [](PyCompilerOptions &self) { + ireeCompilerOptionsSetInputDialectMHLO(self.options); + }) + .def("set_input_dialect_tosa", + [](PyCompilerOptions &self) { + ireeCompilerOptionsSetInputDialectTOSA(self.options); + }) + .def( + "add_target_backend", + [](PyCompilerOptions &self, const std::string &targetBackend) { + ireeCompilerOptionsAddTargetBackend(self.options, + targetBackend.c_str()); + }, + py::arg("target_backend")); + + m.def( + "build_iree_vm_pass_pipeline", + [](PyCompilerOptions &options, MlirPassManager passManager) { + MlirOpPassManager opPassManager = + mlirPassManagerGetAsOpPassManager(passManager); + ireeCompilerBuildIREEVMPassPipeline(options.options, opPassManager); + }, + py::arg("options"), py::arg("pass_manager")); + + m.def( + "translate_module_to_vm_bytecode", + [](PyCompilerOptions &options, MlirModule module, py::object file) { + PyFileAccumulator accum(file, /*binary=*/true); + MlirOperation operation = mlirModuleGetOperation(module); + auto result = ireeCompilerTranslateModuletoVMBytecode( + options.options, operation, accum.getCallback(), + accum.getUserData()); + if (mlirLogicalResultIsFailure(result)) { + throw std::runtime_error("failure translating module to bytecode"); + } + }, + py::arg("options"), py::arg("module"), py::arg("file")); +}
diff --git a/llvm-projects/iree-compiler-api/python/iree/compiler/api/driver.py b/llvm-projects/iree-compiler-api/python/iree/compiler/api/driver.py new file mode 100644 index 0000000..e8bcd96 --- /dev/null +++ b/llvm-projects/iree-compiler-api/python/iree/compiler/api/driver.py
@@ -0,0 +1,7 @@ +# Copyright 2021 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +from .._mlir_libs._ireeCompilerDriver import *
diff --git a/llvm-projects/iree-compiler-api/python/iree/compiler/version.py b/llvm-projects/iree-compiler-api/python/iree/compiler/version.py new file mode 100644 index 0000000..b89f55a --- /dev/null +++ b/llvm-projects/iree-compiler-api/python/iree/compiler/version.py
@@ -0,0 +1,7 @@ +# Copyright 2021 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +# TODO: Populate this.
diff --git a/llvm-projects/iree-compiler-api/setup.py b/llvm-projects/iree-compiler-api/setup.py new file mode 100644 index 0000000..910a5e9 --- /dev/null +++ b/llvm-projects/iree-compiler-api/setup.py
@@ -0,0 +1,145 @@ +# Copyright 2021 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +# Build/install the iree-compiler-backend python package. +# Note that this includes a relatively large build of LLVM (~2400 C++ files) +# and can take a considerable amount of time, especially with defaults. +# To install: +# pip install . --use-feature=in-tree-build +# To build a wheel: +# pip wheel . --use-feature=in-tree-build +# +# It is recommended to build with Ninja and ccache. To do so, set environment +# variables by prefixing to above invocations: +# CMAKE_GENERATOR=Ninja CMAKE_C_COMPILER_LAUNCHER=ccache CMAKE_CXX_COMPILER_LAUNCHER=ccache +# +# On CIs, it is often advantageous to re-use/control the CMake build directory. +# This can be set with the IREE_COMPILER_API_CMAKE_BUILD_DIR env var. +import os +import shutil +import subprocess +import sys +import sysconfig + +from distutils.command.build import build as _build +from setuptools import find_namespace_packages, setup, Extension +from setuptools.command.build_ext import build_ext as _build_ext +from setuptools.command.build_py import build_py as _build_py + + +class CustomBuild(_build): + + def run(self): + self.run_command("build_py") + self.run_command("build_ext") + self.run_command("build_scripts") + + +class CMakeExtension(Extension): + + def __init__(self, name, sourcedir=""): + Extension.__init__(self, name, sources=[]) + self.sourcedir = os.path.abspath(sourcedir) + + +class CMakeBuildPy(_build_py): + + def run(self): + subprocess.check_call(["cmake", "--version"]) + + target_dir = self.build_lib + cmake_build_dir = os.getenv("IREE_COMPILER_API_CMAKE_BUILD_DIR") + if not cmake_build_dir: + cmake_build_dir = os.path.join(target_dir, "..", "cmake_build") + os.makedirs(cmake_build_dir, exist_ok=True) + cmake_build_dir = os.path.abspath(cmake_build_dir) + cmake_install_dir = os.path.abspath( + os.path.join(target_dir, "..", "cmake_install")) + src_dir = os.path.abspath(os.path.dirname(__file__)) + cfg = "Release" + cmake_args = [ + "-DCMAKE_INSTALL_PREFIX={}".format(cmake_install_dir), + "-DPython3_EXECUTABLE={}".format(sys.executable), + "-DPython3_INCLUDE_DIRS={}".format(sysconfig.get_path("include")), + "-DIREE_VERSION_INFO={}".format(self.distribution.get_version()), + "-DCMAKE_BUILD_TYPE={}".format(cfg), + ] + # HACK: CMake fails to auto-detect static linked Python installations, which + # happens to be what exists on manylinux. We detect this and give it a dummy + # library file to reference (which is checks exists but never gets + # used). + python_libdir = sysconfig.get_config_var('LIBDIR') + python_library = sysconfig.get_config_var('LIBRARY') + if python_libdir and not os.path.isabs(python_library): + python_library = os.path.join(python_libdir, python_library) + if python_library and not os.path.exists(python_library): + print("Detected static linked python. Faking a library for cmake.") + fake_libdir = os.path.join(cmake_build_dir, "fake_python", "lib") + os.makedirs(fake_libdir, exist_ok=True) + fake_library = os.path.join(fake_libdir, + sysconfig.get_config_var('LIBRARY')) + with open(fake_library, "wb"): + pass + cmake_args.append("-DPython3_LIBRARY:PATH={}".format(fake_library)) + + build_args = [] + if os.path.exists(cmake_install_dir): + shutil.rmtree(cmake_install_dir) + cmake_cache_file = os.path.join(cmake_build_dir, "CMakeCache.txt") + if os.path.exists(cmake_cache_file): + os.remove(cmake_cache_file) + print(f"Configuring with: {cmake_args}") + subprocess.check_call(["cmake", src_dir] + cmake_args, cwd=cmake_build_dir) + subprocess.check_call( + ["cmake", "--build", ".", "--target", "install/strip"] + build_args, + cwd=cmake_build_dir) + print("Build complete.") + if os.path.exists(target_dir): + shutil.rmtree(target_dir) + print("Copying install to target.") + shutil.copytree(os.path.join(cmake_install_dir, "python_package"), + target_dir, + symlinks=False) + print("Target populated.") + + +class NoopBuildExtension(_build_ext): + + def __init__(self, *args, **kwargs): + assert False + + def build_extension(self, ext): + pass + + +setup( + name="iree-compiler-api", + version="0.0.1", + author="IREE Authors", + author_email="iree-discuss@googlegroups.com", + description="IREE Compiler API", + long_description="", + ext_modules=[ + CMakeExtension("iree.compiler._mlir_libs._mlir"), + CMakeExtension("iree.compiler._mlir_libs._ireeDialects"), + CMakeExtension("iree.compiler._mlir_libs._mlirHlo"), + CMakeExtension("iree.compiler._mlir_libs._mlirLinalgPasses"), + ], + cmdclass={ + "build": CustomBuild, + "built_ext": NoopBuildExtension, + "build_py": CMakeBuildPy, + }, + zip_safe=False, + packages=find_namespace_packages(include=[ + "iree.compiler", + "iree.compiler.*", + ],), + install_requires=[ + "numpy", + "PyYAML", + ], +)
diff --git a/llvm-projects/iree-dialects/.gitignore b/llvm-projects/iree-dialects/.gitignore new file mode 100644 index 0000000..84c048a --- /dev/null +++ b/llvm-projects/iree-dialects/.gitignore
@@ -0,0 +1 @@ +/build/
diff --git a/llvm-projects/iree-dialects/CMakeLists.txt b/llvm-projects/iree-dialects/CMakeLists.txt new file mode 100644 index 0000000..945b878 --- /dev/null +++ b/llvm-projects/iree-dialects/CMakeLists.txt
@@ -0,0 +1,61 @@ +if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) +message(FATAL_ERROR + "This project is intended to be built as part of LLVM via " + "-DLLVM_EXTERNAL_PROJECTS=iree-dialects " + "-DLLVM_EXTERNAL_IREE_DIALECTS_SOURCE_DIR=${CMAKE_CURRENT_SOURCE_DIR}") +endif() + +option(MLIR_ENABLE_BINDINGS_PYTHON "Enables MLIR Python Bindings" OFF) + +set(IREE_DIALECTS_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}") +set(IREE_DIALECTS_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}") +message(STATUS "Building iree-dialects project at ${IREE_DIALECTS_SOURCE_DIR} (into ${IREE_DIALECTS_BINARY_DIR})") + +# TODO: Fix this upstream so that global include directories are not needed. +set(MLIR_MAIN_SRC_DIR ${LLVM_MAIN_SRC_DIR}/../mlir) +set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include) +set(MLIR_GENERATED_INCLUDE_DIR ${CMAKE_BINARY_DIR}/tools/mlir/include) + +# TODO: Needed for tablegen. Remove. +include_directories(SYSTEM ${MLIR_INCLUDE_DIR}) +include_directories(SYSTEM ${MLIR_GENERATED_INCLUDE_DIR}) +include_directories(SYSTEM ${IREE_DIALECTS_SOURCE_DIR}/include) + +function(iree_dialects_target_includes target) + target_include_directories(${target} PUBLIC + $<BUILD_INTERFACE:${MLIR_INCLUDE_DIR}> + $<BUILD_INTERFACE:${MLIR_GENERATED_INCLUDE_DIR}> + $<BUILD_INTERFACE:${IREE_DIALECTS_SOURCE_DIR}/include> + $<BUILD_INTERFACE:${IREE_DIALECTS_BINARY_DIR}/include> + ) +endfunction() + +# Configure CMake and tablegen. +list(APPEND CMAKE_MODULE_PATH ${MLIR_MAIN_SRC_DIR}/cmake/modules) +list(APPEND CMAKE_MODULE_PATH ${LLVM_MAIN_SRC_DIR}/cmake) +set(MLIR_TABLEGEN_EXE mlir-tblgen) + +include(TableGen) +include(AddLLVM) +include(AddMLIR) + +################################################################################ +# Setup python. +# TODO: Make one upstream macro to do this. +################################################################################ + +if(MLIR_ENABLE_BINDINGS_PYTHON) + include(MLIRDetectPythonEnv) + mlir_detect_pybind11_install() + find_package(Python3 ${LLVM_MINIMUM_PYTHON_VERSION} + COMPONENTS Interpreter Development NumPy REQUIRED) + find_package(pybind11 2.6 CONFIG REQUIRED) +endif() + +add_subdirectory(include) +add_subdirectory(lib) +add_subdirectory(test) + +if(MLIR_ENABLE_BINDINGS_PYTHON) + add_subdirectory(python) +endif()
diff --git a/llvm-projects/iree-dialects/README.md b/llvm-projects/iree-dialects/README.md new file mode 100644 index 0000000..90b89dc --- /dev/null +++ b/llvm-projects/iree-dialects/README.md
@@ -0,0 +1,11 @@ +# IREE Dialects Project + +Sources for IREE's public dialects (containing ops/types/attributes that are +unique to IREE and can appear in compiler inputs). + +This project is intended to be used via LLVM's external projects setup: + +* `-DLLVM_EXTERNAL_PROJECTS=iree-dialects` +* `-DLLVM_EXTERNAL_IREE_DIALECTS_SOURCE_DIR={this_directory}` + +It depends on the `mlir` project.
diff --git a/llvm-projects/iree-dialects/build_tools/build_standalone.sh b/llvm-projects/iree-dialects/build_tools/build_standalone.sh new file mode 100644 index 0000000..6200473 --- /dev/null +++ b/llvm-projects/iree-dialects/build_tools/build_standalone.sh
@@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2021 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +# Simple script that does a CMake configure of this project as an external +# LLVM project so it can be tested in isolation to larger assemblies. +# This is meant for CI's and project maintainers. + +set -eu -o errtrace + +project_dir="$(cd $(dirname $0)/.. && pwd)" +repo_root="$(cd "$project_dir"/../.. && pwd)" +llvm_project_dir="$repo_root/third_party/llvm-project" +build_dir="$project_dir/build" + +cmake -GNinja -B"$build_dir" "$llvm_project_dir/llvm" \ + -DCMAKE_BUILD_TYPE=Release \ + -DLLVM_ENABLE_PROJECTS=mlir \ + -DLLVM_EXTERNAL_PROJECTS=iree-dialects \ + -DLLVM_EXTERNAL_IREE_DIALECTS_SOURCE_DIR="$project_dir" \ + -DMLIR_ENABLE_BINDINGS_PYTHON=ON + +cd "$build_dir" +ninja tools/iree-dialects/all
diff --git a/llvm-projects/iree-dialects/include/CMakeLists.txt b/llvm-projects/iree-dialects/include/CMakeLists.txt new file mode 100644 index 0000000..aa87602 --- /dev/null +++ b/llvm-projects/iree-dialects/include/CMakeLists.txt
@@ -0,0 +1 @@ +add_subdirectory(iree-dialects)
diff --git a/llvm-projects/iree-dialects/include/iree-dialects-c/Dialects.h b/llvm-projects/iree-dialects/include/iree-dialects-c/Dialects.h new file mode 100644 index 0000000..f425260 --- /dev/null +++ b/llvm-projects/iree-dialects/include/iree-dialects-c/Dialects.h
@@ -0,0 +1,22 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#ifndef IREE_LLVM_PROJECTS_IREE_DIALECTS_C_DIALECTS_H +#define IREE_LLVM_PROJECTS_IREE_DIALECTS_C_DIALECTS_H + +#include "mlir-c/Registration.h" + +#ifdef __cplusplus +extern "C" { +#endif + +MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(IREE, iree); + +#ifdef __cplusplus +} +#endif + +#endif // IREE_LLVM_PROJECTS_IREE_DIALECTS_C_DIALECTS_H
diff --git a/llvm-projects/iree-dialects/include/iree-dialects/CMakeLists.txt b/llvm-projects/iree-dialects/include/iree-dialects/CMakeLists.txt new file mode 100644 index 0000000..7315b80 --- /dev/null +++ b/llvm-projects/iree-dialects/include/iree-dialects/CMakeLists.txt
@@ -0,0 +1 @@ +add_subdirectory(Dialects)
diff --git a/llvm-projects/iree-dialects/include/iree-dialects/Dialects/CMakeLists.txt b/llvm-projects/iree-dialects/include/iree-dialects/Dialects/CMakeLists.txt new file mode 100644 index 0000000..c931a54 --- /dev/null +++ b/llvm-projects/iree-dialects/include/iree-dialects/Dialects/CMakeLists.txt
@@ -0,0 +1 @@ +add_subdirectory(iree)
diff --git a/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/CMakeLists.txt b/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/CMakeLists.txt new file mode 100644 index 0000000..219c6d9 --- /dev/null +++ b/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/CMakeLists.txt
@@ -0,0 +1,3 @@ +add_mlir_dialect(IREEOps iree) +add_mlir_doc(IREEDialect IREEDialect IREE/ -gen-dialect-doc) +add_mlir_doc(IREEOps IREEOps IREE/ -gen-op-doc)
diff --git a/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/IREEDialect.h b/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/IREEDialect.h new file mode 100644 index 0000000..7f9c0ac --- /dev/null +++ b/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/IREEDialect.h
@@ -0,0 +1,16 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#ifndef IREE_LLVM_PROJECTS_IREE_DIALECTS_DIALECTS_IREE_IREE_DIALECT_H +#define IREE_LLVM_PROJECTS_IREE_DIALECTS_DIALECTS_IREE_IREE_DIALECT_H + +#include "mlir/IR/Dialect.h" + +// Include generated dialect code (this comment blocks clang-format from +// clobbering order). +#include "iree-dialects/Dialects/iree/IREEOpsDialect.h.inc" + +#endif // IREE_LLVM_PROJECTS_IREE_DIALECTS_DIALECTS_IREE_IREE_DIALECT_H
diff --git a/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/IREEDialect.td b/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/IREEDialect.td new file mode 100644 index 0000000..d5bc802 --- /dev/null +++ b/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/IREEDialect.td
@@ -0,0 +1,33 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#ifndef IREE_LLVM_PROJECTS_IREE_DIALECTS_DIALECTS_IREE_IREE_DIALECT_TD +#define IREE_LLVM_PROJECTS_IREE_DIALECTS_DIALECTS_IREE_IREE_DIALECT_TD + +include "mlir/IR/OpBase.td" + +def IREE_Dialect : Dialect { + let name = "iree"; + let summary = "Public ops/type/attributes legal for input to IREE's compiler"; + let description = [{ + IREE's compiler allows as input a number of common dialects. This dialect + contains structural and unique ops that do not exist elsewhere or that IREE + has an interest in maintaining as a stable set. + + The contents of this dialect often mirror various constructs in IREE's + internal implementation. The focus here is on simplicity and stability + over time. Generally, this dialect does not use "advanced" features and + should be broadly source compatible over a range of LLVM versions. There + are of course, limits, and source-compatibility is not guaranteed, since + LLVM/MLIR's API surface is itself unstable. + }]; + let cppNamespace = "::mlir::iree"; +} + +class IREE_Op<string mnemonic, list<OpTrait> traits = []> : + Op<IREE_Dialect, mnemonic, traits>; + +#endif // IREE_LLVM_PROJECTS_IREE_DIALECTS_DIALECTS_IREE_IREE_DIALECT_TD
diff --git a/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/IREEOps.h b/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/IREEOps.h new file mode 100644 index 0000000..3d2d9cf --- /dev/null +++ b/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/IREEOps.h
@@ -0,0 +1,18 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#ifndef IREE_LLVM_PROJECTS_IREE_DIALECTS_DIALECTS_IREE_IREE_OPS_H +#define IREE_LLVM_PROJECTS_IREE_DIALECTS_DIALECTS_IREE_IREE_OPS_H + +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Dialect.h" +#include "mlir/IR/OpDefinition.h" +#include "mlir/Interfaces/SideEffectInterfaces.h" + +#define GET_OP_CLASSES +#include "iree-dialects/Dialects/iree/IREEOps.h.inc" + +#endif // IREE_LLVM_PROJECTS_IREE_DIALECTS_DIALECTS_IREE_IREE_OPS_H
diff --git a/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/IREEOps.td b/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/IREEOps.td new file mode 100644 index 0000000..e219180 --- /dev/null +++ b/llvm-projects/iree-dialects/include/iree-dialects/Dialects/iree/IREEOps.td
@@ -0,0 +1,19 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#ifndef IREE_LLVM_PROJECTS_IREE_DIALECTS_DIALECTS_IREE_IREE_OPS_TD +#define IREE_LLVM_PROJECTS_IREE_DIALECTS_DIALECTS_IREE_IREE_OPS_TD + +include "iree-dialects/Dialects/iree/IREEDialect.td" +include "mlir/Interfaces/SideEffectInterfaces.td" + +def IREE_DummyOp : IREE_Op<"dummy", [NoSideEffect]> { + let summary = "Dummy op to be removed once dialect is populated properly."; + let arguments = (ins); + let results = (outs); +} + +#endif // IREE_LLVM_PROJECTS_IREE_DIALECTS_DIALECTS_IREE_IREE_OPS_TD
diff --git a/llvm-projects/iree-dialects/lib/CAPI/CMakeLists.txt b/llvm-projects/iree-dialects/lib/CAPI/CMakeLists.txt new file mode 100644 index 0000000..ade7f8c --- /dev/null +++ b/llvm-projects/iree-dialects/lib/CAPI/CMakeLists.txt
@@ -0,0 +1,9 @@ +add_mlir_public_c_api_library(IREEDialectsCAPI + Dialects.cpp + LINK_LIBS PUBLIC + IREEDialectsIREEDialect +) + +# TODO: Fix upstream so there is a way to know what the actual compile target +# is (versus prefixing with "obj." which is conditional). +iree_dialects_target_includes(obj.IREEDialectsCAPI)
diff --git a/llvm-projects/iree-dialects/lib/CAPI/Dialects.cpp b/llvm-projects/iree-dialects/lib/CAPI/Dialects.cpp new file mode 100644 index 0000000..62e7e72 --- /dev/null +++ b/llvm-projects/iree-dialects/lib/CAPI/Dialects.cpp
@@ -0,0 +1,12 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "iree-dialects-c/Dialects.h" + +#include "iree-dialects/Dialects/iree/IREEDialect.h" +#include "mlir/CAPI/Registration.h" + +MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(IREE, iree, mlir::iree::IREEDialect)
diff --git a/llvm-projects/iree-dialects/lib/CMakeLists.txt b/llvm-projects/iree-dialects/lib/CMakeLists.txt new file mode 100644 index 0000000..23e377d --- /dev/null +++ b/llvm-projects/iree-dialects/lib/CMakeLists.txt
@@ -0,0 +1,2 @@ +add_subdirectory(CAPI) +add_subdirectory(Dialects)
diff --git a/llvm-projects/iree-dialects/lib/Dialects/CMakeLists.txt b/llvm-projects/iree-dialects/lib/Dialects/CMakeLists.txt new file mode 100644 index 0000000..c931a54 --- /dev/null +++ b/llvm-projects/iree-dialects/lib/Dialects/CMakeLists.txt
@@ -0,0 +1 @@ +add_subdirectory(iree)
diff --git a/llvm-projects/iree-dialects/lib/Dialects/iree/CMakeLists.txt b/llvm-projects/iree-dialects/lib/Dialects/iree/CMakeLists.txt new file mode 100644 index 0000000..05777a3 --- /dev/null +++ b/llvm-projects/iree-dialects/lib/Dialects/iree/CMakeLists.txt
@@ -0,0 +1,18 @@ +add_mlir_library(IREEDialectsIREEDialect + IREEDialect.cpp + IREEOps.cpp + + ADDITIONAL_HEADER_DIRS + ${IREE_DIALECTS_SOURCE_DIR}/include + + DEPENDS + MLIRIREEOpsIncGen + + LINK_LIBS PUBLIC + MLIRIR + MLIRSideEffectInterfaces +) + +# TODO: Fix upstream so there is a way to know what the actual compile target +# is (versus prefixing with "obj." which is conditional). +iree_dialects_target_includes(obj.IREEDialectsIREEDialect)
diff --git a/llvm-projects/iree-dialects/lib/Dialects/iree/IREEDialect.cpp b/llvm-projects/iree-dialects/lib/Dialects/iree/IREEDialect.cpp new file mode 100644 index 0000000..c964b3b --- /dev/null +++ b/llvm-projects/iree-dialects/lib/Dialects/iree/IREEDialect.cpp
@@ -0,0 +1,21 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "iree-dialects/Dialects/iree/IREEDialect.h" + +#include "iree-dialects/Dialects/iree/IREEOps.h" + +using namespace mlir; +using namespace mlir::iree; + +#include "iree-dialects/Dialects/iree/IREEOpsDialect.cpp.inc" + +void IREEDialect::initialize() { + addOperations< +#define GET_OP_LIST +#include "iree-dialects/Dialects/iree/IREEOps.cpp.inc" + >(); +}
diff --git a/llvm-projects/iree-dialects/lib/Dialects/iree/IREEOps.cpp b/llvm-projects/iree-dialects/lib/Dialects/iree/IREEOps.cpp new file mode 100644 index 0000000..39c4dff --- /dev/null +++ b/llvm-projects/iree-dialects/lib/Dialects/iree/IREEOps.cpp
@@ -0,0 +1,13 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "iree-dialects/Dialects/iree/IREEOps.h" + +#include "iree-dialects/Dialects/iree/IREEDialect.h" +#include "mlir/IR/OpImplementation.h" + +#define GET_OP_CLASSES +#include "iree-dialects/Dialects/iree/IREEOps.cpp.inc"
diff --git a/llvm-projects/iree-dialects/python/CMakeLists.txt b/llvm-projects/iree-dialects/python/CMakeLists.txt new file mode 100644 index 0000000..bfc56ef --- /dev/null +++ b/llvm-projects/iree-dialects/python/CMakeLists.txt
@@ -0,0 +1,65 @@ +include(AddMLIRPython) + +################################################################################ +# Sources +################################################################################ + +declare_mlir_python_sources(IREEDialectsPythonSources) +declare_mlir_python_sources(IREEDialectsPythonExtensions) + +declare_mlir_python_sources(IREEDialectsPythonSources.Dialects + ADD_TO_PARENT IREEDialectsPythonSources +) + +declare_mlir_dialect_python_bindings( + ADD_TO_PARENT IREEDialectsPythonSources.Dialects + ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/mlir" + TD_FILE dialects/IreeBinding.td + SOURCES dialects/iree.py + DIALECT_NAME iree +) + +################################################################################ +# Extensions +################################################################################ + +declare_mlir_python_extension(IREEDialectsPythonExtensions.Main + MODULE_NAME _ireeDialects + ADD_TO_PARENT IREEDialectsPythonExtensions + SOURCES + ${CMAKE_CURRENT_SOURCE_DIR}/IREEDialectsModule.cpp + EMBED_CAPI_LINK_LIBS + IREEDialectsCAPI + PRIVATE_LINK_LIBS + LLVMSupport +) + +################################################################################ +# Generate packages and shared library +# Downstreams typically will not use these, but they are useful for local +# testing. +################################################################################ + +set(_source_components + # TODO: Core is now implicitly building/registering all dialects, increasing + # build burden by ~5x. Make it stop. + MLIRPythonSources.Core + IREEDialectsPythonSources + IREEDialectsPythonExtensions +) + +add_mlir_python_common_capi_library(IREEDialectsAggregateCAPI + INSTALL_COMPONENT IREEDialectsPythonModules + INSTALL_DESTINATION python_packages/iree_dialects/mlir/_mlir_libs + OUTPUT_DIRECTORY "${IREE_DIALECTS_BINARY_DIR}/python_packages/iree_dialects/mlir/_mlir_libs" + RELATIVE_INSTALL_ROOT "../../../.." + DECLARED_SOURCES ${_source_components} +) + +add_mlir_python_modules(IREEDialectsPythonModules + ROOT_PREFIX "${IREE_DIALECTS_BINARY_DIR}/python_packages/iree_dialects/mlir" + INSTALL_PREFIX "python_packages/iree_dialects/mlir" + DECLARED_SOURCES ${_source_components} + COMMON_CAPI_LINK_LIBS + IREEDialectsAggregateCAPI + )
diff --git a/llvm-projects/iree-dialects/python/IREEDialectsModule.cpp b/llvm-projects/iree-dialects/python/IREEDialectsModule.cpp new file mode 100644 index 0000000..743605c --- /dev/null +++ b/llvm-projects/iree-dialects/python/IREEDialectsModule.cpp
@@ -0,0 +1,27 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "iree-dialects-c/Dialects.h" +#include "mlir-c/Bindings/Python/Interop.h" +#include "mlir-c/Registration.h" +#include "mlir/Bindings/Python/PybindAdaptors.h" + +namespace py = pybind11; + +PYBIND11_MODULE(_ireeDialects, m) { + m.doc() = "iree-dialects main python extension"; + + m.def( + "register_iree_dialect", + [](MlirContext context, bool load) { + MlirDialectHandle handle = mlirGetDialectHandle__iree__(); + mlirDialectHandleRegisterDialect(handle, context); + if (load) { + mlirDialectHandleLoadDialect(handle, context); + } + }, + py::arg("context"), py::arg("load") = true); +}
diff --git a/llvm-projects/iree-dialects/python/mlir/dialects/IreeBinding.td b/llvm-projects/iree-dialects/python/mlir/dialects/IreeBinding.td new file mode 100644 index 0000000..f03fcee --- /dev/null +++ b/llvm-projects/iree-dialects/python/mlir/dialects/IreeBinding.td
@@ -0,0 +1,13 @@ +// Copyright 2021 The IREE Authors +// +// Licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#ifndef PYTHON_BINDINGS_IREE_OPS +#define PYTHON_BINDINGS_IREE_OPS + +include "mlir/Bindings/Python/Attributes.td" +include "iree-dialects/Dialects/iree/IREEOps.td" + +#endif // PYTHON_BINDINGS_IREE_OPS
diff --git a/llvm-projects/iree-dialects/python/mlir/dialects/iree.py b/llvm-projects/iree-dialects/python/mlir/dialects/iree.py new file mode 100644 index 0000000..8e51281 --- /dev/null +++ b/llvm-projects/iree-dialects/python/mlir/dialects/iree.py
@@ -0,0 +1,8 @@ +# Copyright 2021 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +from ._iree_ops_gen import * +from .._mlir_libs._ireeDialects import register_iree_dialect
diff --git a/llvm-projects/iree-dialects/test/CMakeLists.txt b/llvm-projects/iree-dialects/test/CMakeLists.txt new file mode 100644 index 0000000..45f1daa --- /dev/null +++ b/llvm-projects/iree-dialects/test/CMakeLists.txt
@@ -0,0 +1,28 @@ +llvm_canonicalize_cmake_booleans( + MLIR_ENABLE_BINDINGS_PYTHON +) + +configure_lit_site_cfg( + ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in + ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg.py + MAIN_CONFIG + ${CMAKE_CURRENT_SOURCE_DIR}/lit.cfg.py +) + +set(IREE_DIALECTS_TEST_DEPENDS + FileCheck count not + ) + +if(MLIR_ENABLE_BINDINGS_PYTHON) + list(APPEND IREE_DIALECTS_TEST_DEPENDS + IREEDialectsPythonModules + ) +endif() + +add_lit_testsuite(check-iree-dialects "Running the iree-dialects regression tests" + ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS ${IREE_DIALECTS_TEST_DEPENDS} + ) +set_target_properties(check-iree-dialects PROPERTIES FOLDER "Tests") + +add_lit_testsuites(IREE_DIALECTS ${CMAKE_CURRENT_SOURCE_DIR} DEPENDS ${IREE_DIALECTS_TEST_DEPENDS})
diff --git a/llvm-projects/iree-dialects/test/dummy.py b/llvm-projects/iree-dialects/test/dummy.py new file mode 100644 index 0000000..0aca93d --- /dev/null +++ b/llvm-projects/iree-dialects/test/dummy.py
@@ -0,0 +1,4 @@ +# RUN: %PYTHON %s + +# This test does nothing. It is just here so that if python bindings tests +# are excluded, the test suite is not empty.
diff --git a/llvm-projects/iree-dialects/test/lit.cfg.py b/llvm-projects/iree-dialects/test/lit.cfg.py new file mode 100644 index 0000000..91817a9 --- /dev/null +++ b/llvm-projects/iree-dialects/test/lit.cfg.py
@@ -0,0 +1,74 @@ +# -*- Python -*- +# Copyright 2021 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +import os +import platform +import re +import subprocess +import tempfile + +import lit.formats +import lit.util + +from lit.llvm import llvm_config +from lit.llvm.subst import ToolSubst +from lit.llvm.subst import FindTool + +# Configuration file for the 'lit' test runner. + +# name: The name of this test suite. +config.name = 'IREE_DIALECTS' + +config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell) + +# suffixes: A list of file extensions to treat as test files. +config.suffixes = ['.mlir', '.py'] + +# test_source_root: The root path where tests are located. +config.test_source_root = os.path.dirname(__file__) + +# test_exec_root: The root path where tests should be run. +config.test_exec_root = os.path.join(config.iree_dialects_obj_root, 'test') + +config.substitutions.append(('%PATH%', config.environment['PATH'])) +config.substitutions.append(('%shlibext', config.llvm_shlib_ext)) + +llvm_config.with_system_environment(['HOME', 'INCLUDE', 'LIB', 'TMP', 'TEMP']) + +#llvm_config.use_default_substitutions() + +# excludes: A list of directories to exclude from the testsuite. The 'Inputs' +# subdirectories contain auxiliary inputs for various tests in their parent +# directories. +config.excludes = [ + 'Inputs', 'Examples', 'CMakeLists.txt', 'README.txt', 'LICENSE.txt', + 'lit.cfg.py', 'lit.site.cfg.py' +] + +# test_source_root: The root path where tests are located. +config.test_source_root = os.path.dirname(__file__) + +# test_exec_root: The root path where tests should be run. +config.test_exec_root = os.path.join(config.iree_dialects_obj_root, 'test') +config.standalone_tools_dir = os.path.join(config.iree_dialects_obj_root, 'bin') + +# Tweak the PATH to include the tools dir. +llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True) + +tool_dirs = [config.llvm_tools_dir] +tools = [ + ToolSubst('%PYTHON', config.python_executable, unresolved='ignore'), +] + +llvm_config.add_tool_substitutions(tools, tool_dirs) + +if config.enable_bindings_python: + llvm_config.with_environment('PYTHONPATH', [ + os.path.join(config.iree_dialects_obj_root, 'python_packages', + 'iree_dialects'), + ], + append_path=True)
diff --git a/llvm-projects/iree-dialects/test/lit.site.cfg.py.in b/llvm-projects/iree-dialects/test/lit.site.cfg.py.in new file mode 100644 index 0000000..9573f03 --- /dev/null +++ b/llvm-projects/iree-dialects/test/lit.site.cfg.py.in
@@ -0,0 +1,21 @@ +@LIT_SITE_CFG_IN_HEADER@ + +import sys + +config.enable_bindings_python = @MLIR_ENABLE_BINDINGS_PYTHON@ +config.iree_dialects_obj_root = "@IREE_DIALECTS_BINARY_DIR@" +config.llvm_src_root = "@LLVM_SOURCE_DIR@" +config.llvm_obj_root = "@LLVM_BINARY_DIR@" +config.llvm_tools_dir = "@LLVM_TOOLS_DIR@" +config.llvm_lib_dir = "@LLVM_LIBS_DIR@" +config.llvm_shlib_dir = "@SHLIBDIR@" +config.llvm_shlib_ext = "@SHLIBEXT@" +config.llvm_exe_ext = "@EXEEXT@" +config.lit_tools_dir = "@LLVM_LIT_TOOLS_DIR@" +config.python_executable = sys.executable + +import lit.llvm +lit.llvm.initialize(lit_config, config) + +# Let the main config do the real work. +lit_config.load_config(config, "@IREE_DIALECTS_SOURCE_DIR@/test/lit.cfg.py")
diff --git a/llvm-projects/iree-dialects/test/python/lit.local.cfg b/llvm-projects/iree-dialects/test/python/lit.local.cfg new file mode 100644 index 0000000..4cfe043 --- /dev/null +++ b/llvm-projects/iree-dialects/test/python/lit.local.cfg
@@ -0,0 +1,2 @@ +if not config.enable_bindings_python: + config.unsupported = True
diff --git a/llvm-projects/iree-dialects/test/python/smoketest.py b/llvm-projects/iree-dialects/test/python/smoketest.py new file mode 100644 index 0000000..82f0262 --- /dev/null +++ b/llvm-projects/iree-dialects/test/python/smoketest.py
@@ -0,0 +1,7 @@ +# RUN: %PYTHON %s + +import mlir.ir +from mlir.dialects import iree + +with mlir.ir.Context() as ctx: + iree.register_iree_dialect(ctx)